mm/huge_memory.c: remove dedicated macro HPAGE_CACHE_INDEX_MASK
[linux-2.6-microblaze.git] / include / linux / huge_mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13                   struct vm_area_struct *vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17                   struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29                                    unsigned long addr, pmd_t *pmd,
30                                    unsigned int flags);
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32                            pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34                  unsigned long addr);
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36                  unsigned long addr);
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38                    unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40                     pgprot_t newprot, unsigned long cp_flags);
41 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42                                    pgprot_t pgprot, bool write);
43
44 /**
45  * vmf_insert_pfn_pmd - insert a pmd size pfn
46  * @vmf: Structure describing the fault
47  * @pfn: pfn to insert
48  * @pgprot: page protection to use
49  * @write: whether it's a write fault
50  *
51  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52  *
53  * Return: vm_fault_t value.
54  */
55 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56                                             bool write)
57 {
58         return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59 }
60 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61                                    pgprot_t pgprot, bool write);
62
63 /**
64  * vmf_insert_pfn_pud - insert a pud size pfn
65  * @vmf: Structure describing the fault
66  * @pfn: pfn to insert
67  * @pgprot: page protection to use
68  * @write: whether it's a write fault
69  *
70  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71  *
72  * Return: vm_fault_t value.
73  */
74 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75                                             bool write)
76 {
77         return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78 }
79
80 enum transparent_hugepage_flag {
81         TRANSPARENT_HUGEPAGE_NEVER_DAX,
82         TRANSPARENT_HUGEPAGE_FLAG,
83         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
84         TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
86         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
87         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
88         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
89         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
90 };
91
92 struct kobject;
93 struct kobj_attribute;
94
95 ssize_t single_hugepage_flag_store(struct kobject *kobj,
96                                    struct kobj_attribute *attr,
97                                    const char *buf, size_t count,
98                                    enum transparent_hugepage_flag flag);
99 ssize_t single_hugepage_flag_show(struct kobject *kobj,
100                                   struct kobj_attribute *attr, char *buf,
101                                   enum transparent_hugepage_flag flag);
102 extern struct kobj_attribute shmem_enabled_attr;
103
104 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
108 #define HPAGE_PMD_SHIFT PMD_SHIFT
109 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
110 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
111
112 #define HPAGE_PUD_SHIFT PUD_SHIFT
113 #define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
114 #define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
115
116 extern unsigned long transparent_hugepage_flags;
117
118 /*
119  * to be used on vmas which are known to support THP.
120  * Use transparent_hugepage_enabled otherwise
121  */
122 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
123 {
124
125         /*
126          * If the hardware/firmware marked hugepage support disabled.
127          */
128         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
129                 return false;
130
131         if (vma->vm_flags & VM_NOHUGEPAGE)
132                 return false;
133
134         if (vma_is_temporary_stack(vma))
135                 return false;
136
137         if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
138                 return false;
139
140         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
141                 return true;
142
143         if (vma_is_dax(vma))
144                 return true;
145
146         if (transparent_hugepage_flags &
147                                 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
148                 return !!(vma->vm_flags & VM_HUGEPAGE);
149
150         return false;
151 }
152
153 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
154
155 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
156                 unsigned long haddr)
157 {
158         /* Don't have to check pgoff for anonymous vma */
159         if (!vma_is_anonymous(vma)) {
160                 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
161                                 HPAGE_PMD_NR))
162                         return false;
163         }
164
165         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
166                 return false;
167         return true;
168 }
169
170 #define transparent_hugepage_use_zero_page()                            \
171         (transparent_hugepage_flags &                                   \
172          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
173
174 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
175                 unsigned long len, unsigned long pgoff, unsigned long flags);
176
177 void prep_transhuge_page(struct page *page);
178 void free_transhuge_page(struct page *page);
179 bool is_transparent_hugepage(struct page *page);
180
181 bool can_split_huge_page(struct page *page, int *pextra_pins);
182 int split_huge_page_to_list(struct page *page, struct list_head *list);
183 static inline int split_huge_page(struct page *page)
184 {
185         return split_huge_page_to_list(page, NULL);
186 }
187 void deferred_split_huge_page(struct page *page);
188
189 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
190                 unsigned long address, bool freeze, struct page *page);
191
192 #define split_huge_pmd(__vma, __pmd, __address)                         \
193         do {                                                            \
194                 pmd_t *____pmd = (__pmd);                               \
195                 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
196                                         || pmd_devmap(*____pmd))        \
197                         __split_huge_pmd(__vma, __pmd, __address,       \
198                                                 false, NULL);           \
199         }  while (0)
200
201
202 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
203                 bool freeze, struct page *page);
204
205 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
206                 unsigned long address);
207
208 #define split_huge_pud(__vma, __pud, __address)                         \
209         do {                                                            \
210                 pud_t *____pud = (__pud);                               \
211                 if (pud_trans_huge(*____pud)                            \
212                                         || pud_devmap(*____pud))        \
213                         __split_huge_pud(__vma, __pud, __address);      \
214         }  while (0)
215
216 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
217                      int advice);
218 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
219                            unsigned long end, long adjust_next);
220 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
221 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
222
223 static inline int is_swap_pmd(pmd_t pmd)
224 {
225         return !pmd_none(pmd) && !pmd_present(pmd);
226 }
227
228 /* mmap_lock must be held on entry */
229 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
230                 struct vm_area_struct *vma)
231 {
232         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
233                 return __pmd_trans_huge_lock(pmd, vma);
234         else
235                 return NULL;
236 }
237 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
238                 struct vm_area_struct *vma)
239 {
240         if (pud_trans_huge(*pud) || pud_devmap(*pud))
241                 return __pud_trans_huge_lock(pud, vma);
242         else
243                 return NULL;
244 }
245
246 /**
247  * thp_head - Head page of a transparent huge page.
248  * @page: Any page (tail, head or regular) found in the page cache.
249  */
250 static inline struct page *thp_head(struct page *page)
251 {
252         return compound_head(page);
253 }
254
255 /**
256  * thp_order - Order of a transparent huge page.
257  * @page: Head page of a transparent huge page.
258  */
259 static inline unsigned int thp_order(struct page *page)
260 {
261         VM_BUG_ON_PGFLAGS(PageTail(page), page);
262         if (PageHead(page))
263                 return HPAGE_PMD_ORDER;
264         return 0;
265 }
266
267 /**
268  * thp_nr_pages - The number of regular pages in this huge page.
269  * @page: The head page of a huge page.
270  */
271 static inline int thp_nr_pages(struct page *page)
272 {
273         VM_BUG_ON_PGFLAGS(PageTail(page), page);
274         if (PageHead(page))
275                 return HPAGE_PMD_NR;
276         return 1;
277 }
278
279 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
280                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
281 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
282                 pud_t *pud, int flags, struct dev_pagemap **pgmap);
283
284 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
285
286 extern struct page *huge_zero_page;
287 extern unsigned long huge_zero_pfn;
288
289 static inline bool is_huge_zero_page(struct page *page)
290 {
291         return READ_ONCE(huge_zero_page) == page;
292 }
293
294 static inline bool is_huge_zero_pmd(pmd_t pmd)
295 {
296         return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
297 }
298
299 static inline bool is_huge_zero_pud(pud_t pud)
300 {
301         return false;
302 }
303
304 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
305 void mm_put_huge_zero_page(struct mm_struct *mm);
306
307 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
308
309 static inline bool thp_migration_supported(void)
310 {
311         return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
312 }
313
314 static inline struct list_head *page_deferred_list(struct page *page)
315 {
316         /*
317          * Global or memcg deferred list in the second tail pages is
318          * occupied by compound_head.
319          */
320         return &page[2].deferred_list;
321 }
322
323 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
324 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
325 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
326 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
327
328 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
329 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
330 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
331
332 static inline struct page *thp_head(struct page *page)
333 {
334         VM_BUG_ON_PGFLAGS(PageTail(page), page);
335         return page;
336 }
337
338 static inline unsigned int thp_order(struct page *page)
339 {
340         VM_BUG_ON_PGFLAGS(PageTail(page), page);
341         return 0;
342 }
343
344 static inline int thp_nr_pages(struct page *page)
345 {
346         VM_BUG_ON_PGFLAGS(PageTail(page), page);
347         return 1;
348 }
349
350 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
351 {
352         return false;
353 }
354
355 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
356 {
357         return false;
358 }
359
360 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
361                 unsigned long haddr)
362 {
363         return false;
364 }
365
366 static inline void prep_transhuge_page(struct page *page) {}
367
368 static inline bool is_transparent_hugepage(struct page *page)
369 {
370         return false;
371 }
372
373 #define transparent_hugepage_flags 0UL
374
375 #define thp_get_unmapped_area   NULL
376
377 static inline bool
378 can_split_huge_page(struct page *page, int *pextra_pins)
379 {
380         BUILD_BUG();
381         return false;
382 }
383 static inline int
384 split_huge_page_to_list(struct page *page, struct list_head *list)
385 {
386         return 0;
387 }
388 static inline int split_huge_page(struct page *page)
389 {
390         return 0;
391 }
392 static inline void deferred_split_huge_page(struct page *page) {}
393 #define split_huge_pmd(__vma, __pmd, __address) \
394         do { } while (0)
395
396 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
397                 unsigned long address, bool freeze, struct page *page) {}
398 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
399                 unsigned long address, bool freeze, struct page *page) {}
400
401 #define split_huge_pud(__vma, __pmd, __address) \
402         do { } while (0)
403
404 static inline int hugepage_madvise(struct vm_area_struct *vma,
405                                    unsigned long *vm_flags, int advice)
406 {
407         BUG();
408         return 0;
409 }
410 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
411                                          unsigned long start,
412                                          unsigned long end,
413                                          long adjust_next)
414 {
415 }
416 static inline int is_swap_pmd(pmd_t pmd)
417 {
418         return 0;
419 }
420 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
421                 struct vm_area_struct *vma)
422 {
423         return NULL;
424 }
425 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
426                 struct vm_area_struct *vma)
427 {
428         return NULL;
429 }
430
431 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
432                 pmd_t orig_pmd)
433 {
434         return 0;
435 }
436
437 static inline bool is_huge_zero_page(struct page *page)
438 {
439         return false;
440 }
441
442 static inline bool is_huge_zero_pmd(pmd_t pmd)
443 {
444         return false;
445 }
446
447 static inline bool is_huge_zero_pud(pud_t pud)
448 {
449         return false;
450 }
451
452 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
453 {
454         return;
455 }
456
457 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
458         unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
459 {
460         return NULL;
461 }
462
463 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
464         unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
465 {
466         return NULL;
467 }
468
469 static inline bool thp_migration_supported(void)
470 {
471         return false;
472 }
473 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
474
475 /**
476  * thp_size - Size of a transparent huge page.
477  * @page: Head page of a transparent huge page.
478  *
479  * Return: Number of bytes in this page.
480  */
481 static inline unsigned long thp_size(struct page *page)
482 {
483         return PAGE_SIZE << thp_order(page);
484 }
485
486 #endif /* _LINUX_HUGE_MM_H */