1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
21 #ifndef CONFIG_ARCH_HAS_HUGEPD
22 typedef struct { unsigned long pd; } hugepd_t;
23 #define is_hugepd(hugepd) (0)
24 #define __hugepd(x) ((hugepd_t) { (x) })
27 #ifdef CONFIG_HUGETLB_PAGE
29 #include <linux/mempolicy.h>
30 #include <linux/shm.h>
31 #include <asm/tlbflush.h>
34 * For HugeTLB page, there are more metadata to save in the struct page. But
35 * the head struct page cannot meet our needs, so we have to abuse other tail
36 * struct page to store the metadata.
38 #define __NR_USED_SUBPAGE 3
40 struct hugepage_subpool {
43 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
44 long used_hpages; /* Used count against maximum, includes */
45 /* both allocated and reserved pages. */
46 struct hstate *hstate;
47 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
48 long rsv_hpages; /* Pages reserved against global pool to */
49 /* satisfy minimum size. */
55 struct list_head regions;
56 long adds_in_progress;
57 struct list_head region_cache;
58 long region_cache_count;
59 #ifdef CONFIG_CGROUP_HUGETLB
61 * On private mappings, the counter to uncharge reservations is stored
62 * here. If these fields are 0, then either the mapping is shared, or
63 * cgroup accounting is disabled for this resv_map.
65 struct page_counter *reservation_counter;
66 unsigned long pages_per_hpage;
67 struct cgroup_subsys_state *css;
72 * Region tracking -- allows tracking of reservations and instantiated pages
73 * across the pages in a mapping.
75 * The region data structures are embedded into a resv_map and protected
76 * by a resv_map's lock. The set of regions within the resv_map represent
77 * reservations for huge pages, or huge pages that have already been
78 * instantiated within the map. The from and to elements are huge page
79 * indices into the associated mapping. from indicates the starting index
80 * of the region. to represents the first index past the end of the region.
82 * For example, a file region structure with from == 0 and to == 4 represents
83 * four huge pages in a mapping. It is important to note that the to element
84 * represents the first element past the end of the region. This is used in
85 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
87 * Interval notation of the form [from, to) will be used to indicate that
88 * the endpoint from is inclusive and to is exclusive.
91 struct list_head link;
94 #ifdef CONFIG_CGROUP_HUGETLB
96 * On shared mappings, each reserved region appears as a struct
97 * file_region in resv_map. These fields hold the info needed to
98 * uncharge each reservation.
100 struct page_counter *reservation_counter;
101 struct cgroup_subsys_state *css;
105 struct hugetlb_vma_lock {
107 struct rw_semaphore rw_sema;
108 struct vm_area_struct *vma;
111 extern struct resv_map *resv_map_alloc(void);
112 void resv_map_release(struct kref *ref);
114 extern spinlock_t hugetlb_lock;
115 extern int hugetlb_max_hstate __read_mostly;
116 #define for_each_hstate(h) \
117 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
119 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
121 void hugepage_put_subpool(struct hugepage_subpool *spool);
123 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
124 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
125 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
126 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
128 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
130 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
133 int move_hugetlb_page_tables(struct vm_area_struct *vma,
134 struct vm_area_struct *new_vma,
135 unsigned long old_addr, unsigned long new_addr,
137 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
138 struct vm_area_struct *, struct vm_area_struct *);
139 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
140 unsigned long address, unsigned int flags);
141 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
142 struct page **, struct vm_area_struct **,
143 unsigned long *, unsigned long *, long, unsigned int,
145 void unmap_hugepage_range(struct vm_area_struct *,
146 unsigned long, unsigned long, struct page *,
148 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
149 struct vm_area_struct *vma,
150 unsigned long start, unsigned long end,
151 struct page *ref_page, zap_flags_t zap_flags);
152 void hugetlb_report_meminfo(struct seq_file *);
153 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
154 void hugetlb_show_meminfo_node(int nid);
155 unsigned long hugetlb_total_pages(void);
156 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
157 unsigned long address, unsigned int flags);
158 #ifdef CONFIG_USERFAULTFD
159 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
160 struct vm_area_struct *dst_vma,
161 unsigned long dst_addr,
162 unsigned long src_addr,
163 enum mcopy_atomic_mode mode,
166 #endif /* CONFIG_USERFAULTFD */
167 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
168 struct vm_area_struct *vma,
169 vm_flags_t vm_flags);
170 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
172 int isolate_hugetlb(struct page *page, struct list_head *list);
173 int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
174 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
175 bool *migratable_cleared);
176 void putback_active_hugepage(struct page *page);
177 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
178 void free_huge_page(struct page *page);
179 void hugetlb_fix_reserve_counts(struct inode *inode);
180 extern struct mutex *hugetlb_fault_mutex_table;
181 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
183 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
184 unsigned long addr, pud_t *pud);
186 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
188 extern int sysctl_hugetlb_shm_group;
189 extern struct list_head huge_boot_pages;
193 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long addr, unsigned long sz);
195 pte_t *huge_pte_offset(struct mm_struct *mm,
196 unsigned long addr, unsigned long sz);
197 unsigned long hugetlb_mask_last_page(struct hstate *h);
198 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
199 unsigned long addr, pte_t *ptep);
200 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
201 unsigned long *start, unsigned long *end);
203 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
204 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
205 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
206 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
207 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
208 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
209 void hugetlb_vma_lock_release(struct kref *kref);
211 int pmd_huge(pmd_t pmd);
212 int pud_huge(pud_t pud);
213 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
214 unsigned long address, unsigned long end, pgprot_t newprot,
215 unsigned long cp_flags);
217 bool is_hugetlb_entry_migration(pte_t pte);
218 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
220 #else /* !CONFIG_HUGETLB_PAGE */
222 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
226 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
230 static inline unsigned long hugetlb_total_pages(void)
235 static inline struct address_space *hugetlb_page_mapping_lock_write(
241 static inline int huge_pmd_unshare(struct mm_struct *mm,
242 struct vm_area_struct *vma,
243 unsigned long addr, pte_t *ptep)
248 static inline void adjust_range_if_pmd_sharing_possible(
249 struct vm_area_struct *vma,
250 unsigned long *start, unsigned long *end)
254 static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
255 unsigned long address, unsigned int flags)
257 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
260 static inline long follow_hugetlb_page(struct mm_struct *mm,
261 struct vm_area_struct *vma, struct page **pages,
262 struct vm_area_struct **vmas, unsigned long *position,
263 unsigned long *nr_pages, long i, unsigned int flags,
270 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
271 struct mm_struct *src,
272 struct vm_area_struct *dst_vma,
273 struct vm_area_struct *src_vma)
279 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
280 struct vm_area_struct *new_vma,
281 unsigned long old_addr,
282 unsigned long new_addr,
289 static inline void hugetlb_report_meminfo(struct seq_file *m)
293 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
298 static inline void hugetlb_show_meminfo_node(int nid)
302 static inline int prepare_hugepage_range(struct file *file,
303 unsigned long addr, unsigned long len)
308 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
312 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
316 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
320 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
324 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
329 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
333 static inline int pmd_huge(pmd_t pmd)
338 static inline int pud_huge(pud_t pud)
343 static inline int is_hugepage_only_range(struct mm_struct *mm,
344 unsigned long addr, unsigned long len)
349 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
350 unsigned long addr, unsigned long end,
351 unsigned long floor, unsigned long ceiling)
356 #ifdef CONFIG_USERFAULTFD
357 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
359 struct vm_area_struct *dst_vma,
360 unsigned long dst_addr,
361 unsigned long src_addr,
362 enum mcopy_atomic_mode mode,
369 #endif /* CONFIG_USERFAULTFD */
371 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
377 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
382 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
387 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
388 bool *migratable_cleared)
393 static inline void putback_active_hugepage(struct page *page)
397 static inline void move_hugetlb_state(struct folio *old_folio,
398 struct folio *new_folio, int reason)
402 static inline unsigned long hugetlb_change_protection(
403 struct vm_area_struct *vma, unsigned long address,
404 unsigned long end, pgprot_t newprot,
405 unsigned long cp_flags)
410 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
411 struct vm_area_struct *vma, unsigned long start,
412 unsigned long end, struct page *ref_page,
413 zap_flags_t zap_flags)
418 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
419 struct vm_area_struct *vma, unsigned long address,
426 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
428 #endif /* !CONFIG_HUGETLB_PAGE */
430 * hugepages at page global directory. If arch support
431 * hugepages at pgd level, they need to define this.
434 #define pgd_huge(x) 0
437 #define p4d_huge(x) 0
441 static inline int pgd_write(pgd_t pgd)
448 #define HUGETLB_ANON_FILE "anon_hugepage"
452 * The file will be used as an shm file so shmfs accounting rules
455 HUGETLB_SHMFS_INODE = 1,
457 * The file is being created on the internal vfs mount and shmfs
458 * accounting rules do not apply
460 HUGETLB_ANONHUGE_INODE = 2,
463 #ifdef CONFIG_HUGETLBFS
464 struct hugetlbfs_sb_info {
465 long max_inodes; /* inodes allowed */
466 long free_inodes; /* inodes free */
467 spinlock_t stat_lock;
468 struct hstate *hstate;
469 struct hugepage_subpool *spool;
475 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
477 return sb->s_fs_info;
480 struct hugetlbfs_inode_info {
481 struct shared_policy policy;
482 struct inode vfs_inode;
486 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
488 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
491 extern const struct file_operations hugetlbfs_file_operations;
492 extern const struct vm_operations_struct hugetlb_vm_ops;
493 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
494 int creat_flags, int page_size_log);
496 static inline bool is_file_hugepages(struct file *file)
498 if (file->f_op == &hugetlbfs_file_operations)
501 return is_file_shm_hugepages(file);
504 static inline struct hstate *hstate_inode(struct inode *i)
506 return HUGETLBFS_SB(i->i_sb)->hstate;
508 #else /* !CONFIG_HUGETLBFS */
510 #define is_file_hugepages(file) false
511 static inline struct file *
512 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
513 int creat_flags, int page_size_log)
515 return ERR_PTR(-ENOSYS);
518 static inline struct hstate *hstate_inode(struct inode *i)
522 #endif /* !CONFIG_HUGETLBFS */
524 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
525 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
526 unsigned long len, unsigned long pgoff,
527 unsigned long flags);
528 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
531 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
532 unsigned long len, unsigned long pgoff,
533 unsigned long flags);
536 * huegtlb page specific state flags. These flags are located in page.private
537 * of the hugetlb head page. Functions created via the below macros should be
538 * used to manipulate these flags.
540 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
541 * allocation time. Cleared when page is fully instantiated. Free
542 * routine checks flag to restore a reservation on error paths.
543 * Synchronization: Examined or modified by code that knows it has
544 * the only reference to page. i.e. After allocation but before use
545 * or when the page is being freed.
546 * HPG_migratable - Set after a newly allocated page is added to the page
547 * cache and/or page tables. Indicates the page is a candidate for
549 * Synchronization: Initially set after new page allocation with no
550 * locking. When examined and modified during migration processing
551 * (isolate, migrate, putback) the hugetlb_lock is held.
552 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
553 * allocator. Typically used for migration target pages when no pages
554 * are available in the pool. The hugetlb free page path will
555 * immediately free pages with this flag set to the buddy allocator.
556 * Synchronization: Can be set after huge page allocation from buddy when
557 * code knows it has only reference. All other examinations and
558 * modifications require hugetlb_lock.
559 * HPG_freed - Set when page is on the free lists.
560 * Synchronization: hugetlb_lock held for examination and modification.
561 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
562 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
563 * that is not tracked by raw_hwp_page list.
565 enum hugetlb_page_flags {
566 HPG_restore_reserve = 0,
570 HPG_vmemmap_optimized,
571 HPG_raw_hwp_unreliable,
576 * Macros to create test, set and clear function definitions for
577 * hugetlb specific page flags.
579 #ifdef CONFIG_HUGETLB_PAGE
580 #define TESTHPAGEFLAG(uname, flname) \
581 static __always_inline \
582 bool folio_test_hugetlb_##flname(struct folio *folio) \
583 { void *private = &folio->private; \
584 return test_bit(HPG_##flname, private); \
586 static inline int HPage##uname(struct page *page) \
587 { return test_bit(HPG_##flname, &(page->private)); }
589 #define SETHPAGEFLAG(uname, flname) \
590 static __always_inline \
591 void folio_set_hugetlb_##flname(struct folio *folio) \
592 { void *private = &folio->private; \
593 set_bit(HPG_##flname, private); \
595 static inline void SetHPage##uname(struct page *page) \
596 { set_bit(HPG_##flname, &(page->private)); }
598 #define CLEARHPAGEFLAG(uname, flname) \
599 static __always_inline \
600 void folio_clear_hugetlb_##flname(struct folio *folio) \
601 { void *private = &folio->private; \
602 clear_bit(HPG_##flname, private); \
604 static inline void ClearHPage##uname(struct page *page) \
605 { clear_bit(HPG_##flname, &(page->private)); }
607 #define TESTHPAGEFLAG(uname, flname) \
609 folio_test_hugetlb_##flname(struct folio *folio) \
611 static inline int HPage##uname(struct page *page) \
614 #define SETHPAGEFLAG(uname, flname) \
616 folio_set_hugetlb_##flname(struct folio *folio) \
618 static inline void SetHPage##uname(struct page *page) \
621 #define CLEARHPAGEFLAG(uname, flname) \
623 folio_clear_hugetlb_##flname(struct folio *folio) \
625 static inline void ClearHPage##uname(struct page *page) \
629 #define HPAGEFLAG(uname, flname) \
630 TESTHPAGEFLAG(uname, flname) \
631 SETHPAGEFLAG(uname, flname) \
632 CLEARHPAGEFLAG(uname, flname) \
635 * Create functions associated with hugetlb page flags
637 HPAGEFLAG(RestoreReserve, restore_reserve)
638 HPAGEFLAG(Migratable, migratable)
639 HPAGEFLAG(Temporary, temporary)
640 HPAGEFLAG(Freed, freed)
641 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
642 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
644 #ifdef CONFIG_HUGETLB_PAGE
646 #define HSTATE_NAME_LEN 32
647 /* Defines one hugetlb page size */
649 struct mutex resize_lock;
650 int next_nid_to_alloc;
651 int next_nid_to_free;
653 unsigned int demote_order;
655 unsigned long max_huge_pages;
656 unsigned long nr_huge_pages;
657 unsigned long free_huge_pages;
658 unsigned long resv_huge_pages;
659 unsigned long surplus_huge_pages;
660 unsigned long nr_overcommit_huge_pages;
661 struct list_head hugepage_activelist;
662 struct list_head hugepage_freelists[MAX_NUMNODES];
663 unsigned int max_huge_pages_node[MAX_NUMNODES];
664 unsigned int nr_huge_pages_node[MAX_NUMNODES];
665 unsigned int free_huge_pages_node[MAX_NUMNODES];
666 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
667 #ifdef CONFIG_CGROUP_HUGETLB
668 /* cgroup control files */
669 struct cftype cgroup_files_dfl[8];
670 struct cftype cgroup_files_legacy[10];
672 char name[HSTATE_NAME_LEN];
675 struct huge_bootmem_page {
676 struct list_head list;
677 struct hstate *hstate;
680 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
681 struct page *alloc_huge_page(struct vm_area_struct *vma,
682 unsigned long addr, int avoid_reserve);
683 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
684 nodemask_t *nmask, gfp_t gfp_mask);
685 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
686 unsigned long address);
687 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
689 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
690 unsigned long address, struct page *page);
693 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
694 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
695 bool __init hugetlb_node_alloc_supported(void);
697 void __init hugetlb_add_hstate(unsigned order);
698 bool __init arch_hugetlb_valid_size(unsigned long size);
699 struct hstate *size_to_hstate(unsigned long size);
701 #ifndef HUGE_MAX_HSTATE
702 #define HUGE_MAX_HSTATE 1
705 extern struct hstate hstates[HUGE_MAX_HSTATE];
706 extern unsigned int default_hstate_idx;
708 #define default_hstate (hstates[default_hstate_idx])
710 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
712 return folio->_hugetlb_subpool;
716 * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
718 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
720 return hugetlb_folio_subpool(page_folio(hpage));
723 static inline void hugetlb_set_folio_subpool(struct folio *folio,
724 struct hugepage_subpool *subpool)
726 folio->_hugetlb_subpool = subpool;
729 static inline void hugetlb_set_page_subpool(struct page *hpage,
730 struct hugepage_subpool *subpool)
732 hugetlb_set_folio_subpool(page_folio(hpage), subpool);
735 static inline struct hstate *hstate_file(struct file *f)
737 return hstate_inode(file_inode(f));
740 static inline struct hstate *hstate_sizelog(int page_size_log)
743 return &default_hstate;
745 return size_to_hstate(1UL << page_size_log);
748 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
750 return hstate_file(vma->vm_file);
753 static inline unsigned long huge_page_size(const struct hstate *h)
755 return (unsigned long)PAGE_SIZE << h->order;
758 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
760 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
762 static inline unsigned long huge_page_mask(struct hstate *h)
767 static inline unsigned int huge_page_order(struct hstate *h)
772 static inline unsigned huge_page_shift(struct hstate *h)
774 return h->order + PAGE_SHIFT;
777 static inline bool hstate_is_gigantic(struct hstate *h)
779 return huge_page_order(h) >= MAX_ORDER;
782 static inline unsigned int pages_per_huge_page(const struct hstate *h)
784 return 1 << h->order;
787 static inline unsigned int blocks_per_huge_page(struct hstate *h)
789 return huge_page_size(h) / 512;
792 #include <asm/hugetlb.h>
794 #ifndef is_hugepage_only_range
795 static inline int is_hugepage_only_range(struct mm_struct *mm,
796 unsigned long addr, unsigned long len)
800 #define is_hugepage_only_range is_hugepage_only_range
803 #ifndef arch_clear_hugepage_flags
804 static inline void arch_clear_hugepage_flags(struct page *page) { }
805 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
808 #ifndef arch_make_huge_pte
809 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
812 return pte_mkhuge(entry);
816 static inline struct hstate *folio_hstate(struct folio *folio)
818 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
819 return size_to_hstate(folio_size(folio));
822 static inline struct hstate *page_hstate(struct page *page)
824 return folio_hstate(page_folio(page));
827 static inline unsigned hstate_index_to_shift(unsigned index)
829 return hstates[index].order + PAGE_SHIFT;
832 static inline int hstate_index(struct hstate *h)
837 extern int dissolve_free_huge_page(struct page *page);
838 extern int dissolve_free_huge_pages(unsigned long start_pfn,
839 unsigned long end_pfn);
841 #ifdef CONFIG_MEMORY_FAILURE
842 extern void hugetlb_clear_page_hwpoison(struct page *hpage);
844 static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
849 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
850 #ifndef arch_hugetlb_migration_supported
851 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
853 if ((huge_page_shift(h) == PMD_SHIFT) ||
854 (huge_page_shift(h) == PUD_SHIFT) ||
855 (huge_page_shift(h) == PGDIR_SHIFT))
862 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
868 static inline bool hugepage_migration_supported(struct hstate *h)
870 return arch_hugetlb_migration_supported(h);
874 * Movability check is different as compared to migration check.
875 * It determines whether or not a huge page should be placed on
876 * movable zone or not. Movability of any huge page should be
877 * required only if huge page size is supported for migration.
878 * There won't be any reason for the huge page to be movable if
879 * it is not migratable to start with. Also the size of the huge
880 * page should be large enough to be placed under a movable zone
881 * and still feasible enough to be migratable. Just the presence
882 * in movable zone does not make the migration feasible.
884 * So even though large huge page sizes like the gigantic ones
885 * are migratable they should not be movable because its not
886 * feasible to migrate them from movable zone.
888 static inline bool hugepage_movable_supported(struct hstate *h)
890 if (!hugepage_migration_supported(h))
893 if (hstate_is_gigantic(h))
898 /* Movability of hugepages depends on migration support. */
899 static inline gfp_t htlb_alloc_mask(struct hstate *h)
901 if (hugepage_movable_supported(h))
902 return GFP_HIGHUSER_MOVABLE;
907 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
909 gfp_t modified_mask = htlb_alloc_mask(h);
911 /* Some callers might want to enforce node */
912 modified_mask |= (gfp_mask & __GFP_THISNODE);
914 modified_mask |= (gfp_mask & __GFP_NOWARN);
916 return modified_mask;
919 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
920 struct mm_struct *mm, pte_t *pte)
922 if (huge_page_size(h) == PMD_SIZE)
923 return pmd_lockptr(mm, (pmd_t *) pte);
924 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
925 return &mm->page_table_lock;
928 #ifndef hugepages_supported
930 * Some platform decide whether they support huge pages at boot
931 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
932 * when there is no such support
934 #define hugepages_supported() (HPAGE_SHIFT != 0)
937 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
939 static inline void hugetlb_count_init(struct mm_struct *mm)
941 atomic_long_set(&mm->hugetlb_usage, 0);
944 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
946 atomic_long_add(l, &mm->hugetlb_usage);
949 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
951 atomic_long_sub(l, &mm->hugetlb_usage);
954 #ifndef huge_ptep_modify_prot_start
955 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
956 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
957 unsigned long addr, pte_t *ptep)
959 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
963 #ifndef huge_ptep_modify_prot_commit
964 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
965 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
966 unsigned long addr, pte_t *ptep,
967 pte_t old_pte, pte_t pte)
969 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
974 void hugetlb_register_node(struct node *node);
975 void hugetlb_unregister_node(struct node *node);
978 #else /* CONFIG_HUGETLB_PAGE */
981 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
986 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
991 static inline int isolate_or_dissolve_huge_page(struct page *page,
992 struct list_head *list)
997 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
1004 static inline struct page *
1005 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1006 nodemask_t *nmask, gfp_t gfp_mask)
1011 static inline struct page *alloc_huge_page_vma(struct hstate *h,
1012 struct vm_area_struct *vma,
1013 unsigned long address)
1018 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1023 static inline struct hstate *hstate_file(struct file *f)
1028 static inline struct hstate *hstate_sizelog(int page_size_log)
1033 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1038 static inline struct hstate *folio_hstate(struct folio *folio)
1043 static inline struct hstate *page_hstate(struct page *page)
1048 static inline struct hstate *size_to_hstate(unsigned long size)
1053 static inline unsigned long huge_page_size(struct hstate *h)
1058 static inline unsigned long huge_page_mask(struct hstate *h)
1063 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1068 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1073 static inline unsigned int huge_page_order(struct hstate *h)
1078 static inline unsigned int huge_page_shift(struct hstate *h)
1083 static inline bool hstate_is_gigantic(struct hstate *h)
1088 static inline unsigned int pages_per_huge_page(struct hstate *h)
1093 static inline unsigned hstate_index_to_shift(unsigned index)
1098 static inline int hstate_index(struct hstate *h)
1103 static inline int dissolve_free_huge_page(struct page *page)
1108 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1109 unsigned long end_pfn)
1114 static inline bool hugepage_migration_supported(struct hstate *h)
1119 static inline bool hugepage_movable_supported(struct hstate *h)
1124 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1129 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1134 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1135 struct mm_struct *mm, pte_t *pte)
1137 return &mm->page_table_lock;
1140 static inline void hugetlb_count_init(struct mm_struct *mm)
1144 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1148 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1152 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1153 unsigned long addr, pte_t *ptep)
1158 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1159 pte_t *ptep, pte_t pte)
1163 static inline void hugetlb_register_node(struct node *node)
1167 static inline void hugetlb_unregister_node(struct node *node)
1170 #endif /* CONFIG_HUGETLB_PAGE */
1172 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1173 struct mm_struct *mm, pte_t *pte)
1177 ptl = huge_pte_lockptr(h, mm, pte);
1182 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1183 extern void __init hugetlb_cma_reserve(int order);
1185 static inline __init void hugetlb_cma_reserve(int order)
1190 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1192 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1194 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1197 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1200 #endif /* _LINUX_HUGETLB_H */