1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
8 #include <linux/fs.h> /* only for vma_is_dax() */
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 unsigned long cp_flags);
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
43 enum transparent_hugepage_flag {
44 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
45 TRANSPARENT_HUGEPAGE_FLAG,
46 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
47 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
48 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
49 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
50 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
51 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
52 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
56 struct kobj_attribute;
58 ssize_t single_hugepage_flag_store(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 const char *buf, size_t count,
61 enum transparent_hugepage_flag flag);
62 ssize_t single_hugepage_flag_show(struct kobject *kobj,
63 struct kobj_attribute *attr, char *buf,
64 enum transparent_hugepage_flag flag);
65 extern struct kobj_attribute shmem_enabled_attr;
67 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
68 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
70 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
71 #define HPAGE_PMD_SHIFT PMD_SHIFT
72 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
73 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
75 #define HPAGE_PUD_SHIFT PUD_SHIFT
76 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
77 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
79 extern unsigned long transparent_hugepage_flags;
81 #define hugepage_flags_enabled() \
82 (transparent_hugepage_flags & \
83 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
84 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
85 #define hugepage_flags_always() \
86 (transparent_hugepage_flags & \
87 (1<<TRANSPARENT_HUGEPAGE_FLAG))
90 * Do the below checks:
91 * - For file vma, check if the linear page offset of vma is
92 * HPAGE_PMD_NR aligned within the file. The hugepage is
93 * guaranteed to be hugepage-aligned within the file, but we must
94 * check that the PMD-aligned addresses in the VMA map to
95 * PMD-aligned offsets within the file, else the hugepage will
96 * not be PMD-mappable.
97 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
100 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
105 /* Don't have to check pgoff for anonymous vma */
106 if (!vma_is_anonymous(vma)) {
107 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
112 haddr = addr & HPAGE_PMD_MASK;
114 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
119 static inline bool file_thp_enabled(struct vm_area_struct *vma)
126 inode = vma->vm_file->f_inode;
128 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
129 (vma->vm_flags & VM_EXEC) &&
130 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
133 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
134 bool smaps, bool in_pf, bool enforce_sysfs);
136 #define transparent_hugepage_use_zero_page() \
137 (transparent_hugepage_flags & \
138 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
140 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
141 unsigned long len, unsigned long pgoff, unsigned long flags);
143 void prep_transhuge_page(struct page *page);
144 void free_transhuge_page(struct page *page);
146 bool can_split_folio(struct folio *folio, int *pextra_pins);
147 int split_huge_page_to_list(struct page *page, struct list_head *list);
148 static inline int split_huge_page(struct page *page)
150 return split_huge_page_to_list(page, NULL);
152 void deferred_split_folio(struct folio *folio);
154 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
155 unsigned long address, bool freeze, struct folio *folio);
157 #define split_huge_pmd(__vma, __pmd, __address) \
159 pmd_t *____pmd = (__pmd); \
160 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
161 || pmd_devmap(*____pmd)) \
162 __split_huge_pmd(__vma, __pmd, __address, \
167 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
168 bool freeze, struct folio *folio);
170 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
171 unsigned long address);
173 #define split_huge_pud(__vma, __pud, __address) \
175 pud_t *____pud = (__pud); \
176 if (pud_trans_huge(*____pud) \
177 || pud_devmap(*____pud)) \
178 __split_huge_pud(__vma, __pud, __address); \
181 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
183 int madvise_collapse(struct vm_area_struct *vma,
184 struct vm_area_struct **prev,
185 unsigned long start, unsigned long end);
186 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
187 unsigned long end, long adjust_next);
188 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
189 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
191 static inline int is_swap_pmd(pmd_t pmd)
193 return !pmd_none(pmd) && !pmd_present(pmd);
196 /* mmap_lock must be held on entry */
197 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
198 struct vm_area_struct *vma)
200 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
201 return __pmd_trans_huge_lock(pmd, vma);
205 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
206 struct vm_area_struct *vma)
208 if (pud_trans_huge(*pud) || pud_devmap(*pud))
209 return __pud_trans_huge_lock(pud, vma);
215 * folio_test_pmd_mappable - Can we map this folio with a PMD?
216 * @folio: The folio to test
218 static inline bool folio_test_pmd_mappable(struct folio *folio)
220 return folio_order(folio) >= HPAGE_PMD_ORDER;
223 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
224 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
225 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
226 pud_t *pud, int flags, struct dev_pagemap **pgmap);
228 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
230 extern struct page *huge_zero_page;
231 extern unsigned long huge_zero_pfn;
233 static inline bool is_huge_zero_page(struct page *page)
235 return READ_ONCE(huge_zero_page) == page;
238 static inline bool is_huge_zero_pmd(pmd_t pmd)
240 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
243 static inline bool is_huge_zero_pud(pud_t pud)
248 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
249 void mm_put_huge_zero_page(struct mm_struct *mm);
251 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
253 static inline bool thp_migration_supported(void)
255 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
258 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
259 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
260 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
261 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
263 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
264 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
265 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
267 static inline bool folio_test_pmd_mappable(struct folio *folio)
272 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
278 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
279 unsigned long vm_flags, bool smaps,
280 bool in_pf, bool enforce_sysfs)
285 static inline void prep_transhuge_page(struct page *page) {}
287 #define transparent_hugepage_flags 0UL
289 #define thp_get_unmapped_area NULL
292 can_split_folio(struct folio *folio, int *pextra_pins)
297 split_huge_page_to_list(struct page *page, struct list_head *list)
301 static inline int split_huge_page(struct page *page)
305 static inline void deferred_split_folio(struct folio *folio) {}
306 #define split_huge_pmd(__vma, __pmd, __address) \
309 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
310 unsigned long address, bool freeze, struct folio *folio) {}
311 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
312 unsigned long address, bool freeze, struct folio *folio) {}
314 #define split_huge_pud(__vma, __pmd, __address) \
317 static inline int hugepage_madvise(struct vm_area_struct *vma,
318 unsigned long *vm_flags, int advice)
323 static inline int madvise_collapse(struct vm_area_struct *vma,
324 struct vm_area_struct **prev,
325 unsigned long start, unsigned long end)
330 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
336 static inline int is_swap_pmd(pmd_t pmd)
340 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
341 struct vm_area_struct *vma)
345 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
346 struct vm_area_struct *vma)
351 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
356 static inline bool is_huge_zero_page(struct page *page)
361 static inline bool is_huge_zero_pmd(pmd_t pmd)
366 static inline bool is_huge_zero_pud(pud_t pud)
371 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
376 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
377 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
382 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
383 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
388 static inline bool thp_migration_supported(void)
392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
394 static inline int split_folio_to_list(struct folio *folio,
395 struct list_head *list)
397 return split_huge_page_to_list(&folio->page, list);
400 static inline int split_folio(struct folio *folio)
402 return split_folio_to_list(folio, NULL);
406 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
407 * limitations in the implementation like arm64 MTE can override this to
410 #ifndef arch_thp_swp_supported
411 static inline bool arch_thp_swp_supported(void)
417 #endif /* _LINUX_HUGE_MM_H */