Merge tag 'drm-msm-fixes-2022-04-30' of https://gitlab.freedesktop.org/drm/msm into...
[linux-2.6-microblaze.git] / include / linux / huge_mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13                   struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17                   struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29                                    unsigned long addr, pmd_t *pmd,
30                                    unsigned int flags);
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32                            pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34                  unsigned long addr);
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36                  unsigned long addr);
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38                    unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40                     pgprot_t newprot, unsigned long cp_flags);
41 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42                                    pgprot_t pgprot, bool write);
43
44 /**
45  * vmf_insert_pfn_pmd - insert a pmd size pfn
46  * @vmf: Structure describing the fault
47  * @pfn: pfn to insert
48  * @pgprot: page protection to use
49  * @write: whether it's a write fault
50  *
51  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52  *
53  * Return: vm_fault_t value.
54  */
55 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56                                             bool write)
57 {
58         return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59 }
60 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61                                    pgprot_t pgprot, bool write);
62
63 /**
64  * vmf_insert_pfn_pud - insert a pud size pfn
65  * @vmf: Structure describing the fault
66  * @pfn: pfn to insert
67  * @pgprot: page protection to use
68  * @write: whether it's a write fault
69  *
70  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71  *
72  * Return: vm_fault_t value.
73  */
74 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75                                             bool write)
76 {
77         return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78 }
79
80 enum transparent_hugepage_flag {
81         TRANSPARENT_HUGEPAGE_NEVER_DAX,
82         TRANSPARENT_HUGEPAGE_FLAG,
83         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
84         TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
86         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
87         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
88         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
89         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
90 };
91
92 struct kobject;
93 struct kobj_attribute;
94
95 ssize_t single_hugepage_flag_store(struct kobject *kobj,
96                                    struct kobj_attribute *attr,
97                                    const char *buf, size_t count,
98                                    enum transparent_hugepage_flag flag);
99 ssize_t single_hugepage_flag_show(struct kobject *kobj,
100                                   struct kobj_attribute *attr, char *buf,
101                                   enum transparent_hugepage_flag flag);
102 extern struct kobj_attribute shmem_enabled_attr;
103
104 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
108 #define HPAGE_PMD_SHIFT PMD_SHIFT
109 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
110 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
111
112 #define HPAGE_PUD_SHIFT PUD_SHIFT
113 #define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
114 #define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
115
116 extern unsigned long transparent_hugepage_flags;
117
118 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
119                 unsigned long haddr)
120 {
121         /* Don't have to check pgoff for anonymous vma */
122         if (!vma_is_anonymous(vma)) {
123                 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
124                                 HPAGE_PMD_NR))
125                         return false;
126         }
127
128         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
129                 return false;
130         return true;
131 }
132
133 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
134                                           unsigned long vm_flags)
135 {
136         /* Explicitly disabled through madvise. */
137         if ((vm_flags & VM_NOHUGEPAGE) ||
138             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
139                 return false;
140         return true;
141 }
142
143 /*
144  * to be used on vmas which are known to support THP.
145  * Use transparent_hugepage_active otherwise
146  */
147 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
148 {
149
150         /*
151          * If the hardware/firmware marked hugepage support disabled.
152          */
153         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
154                 return false;
155
156         if (!transhuge_vma_enabled(vma, vma->vm_flags))
157                 return false;
158
159         if (vma_is_temporary_stack(vma))
160                 return false;
161
162         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
163                 return true;
164
165         if (vma_is_dax(vma))
166                 return true;
167
168         if (transparent_hugepage_flags &
169                                 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
170                 return !!(vma->vm_flags & VM_HUGEPAGE);
171
172         return false;
173 }
174
175 bool transparent_hugepage_active(struct vm_area_struct *vma);
176
177 #define transparent_hugepage_use_zero_page()                            \
178         (transparent_hugepage_flags &                                   \
179          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
180
181 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
182                 unsigned long len, unsigned long pgoff, unsigned long flags);
183
184 void prep_transhuge_page(struct page *page);
185 void free_transhuge_page(struct page *page);
186
187 bool can_split_folio(struct folio *folio, int *pextra_pins);
188 int split_huge_page_to_list(struct page *page, struct list_head *list);
189 static inline int split_huge_page(struct page *page)
190 {
191         return split_huge_page_to_list(page, NULL);
192 }
193 void deferred_split_huge_page(struct page *page);
194
195 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
196                 unsigned long address, bool freeze, struct folio *folio);
197
198 #define split_huge_pmd(__vma, __pmd, __address)                         \
199         do {                                                            \
200                 pmd_t *____pmd = (__pmd);                               \
201                 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
202                                         || pmd_devmap(*____pmd))        \
203                         __split_huge_pmd(__vma, __pmd, __address,       \
204                                                 false, NULL);           \
205         }  while (0)
206
207
208 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
209                 bool freeze, struct folio *folio);
210
211 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
212                 unsigned long address);
213
214 #define split_huge_pud(__vma, __pud, __address)                         \
215         do {                                                            \
216                 pud_t *____pud = (__pud);                               \
217                 if (pud_trans_huge(*____pud)                            \
218                                         || pud_devmap(*____pud))        \
219                         __split_huge_pud(__vma, __pud, __address);      \
220         }  while (0)
221
222 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
223                      int advice);
224 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
225                            unsigned long end, long adjust_next);
226 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
227 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
228
229 static inline int is_swap_pmd(pmd_t pmd)
230 {
231         return !pmd_none(pmd) && !pmd_present(pmd);
232 }
233
234 /* mmap_lock must be held on entry */
235 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
236                 struct vm_area_struct *vma)
237 {
238         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
239                 return __pmd_trans_huge_lock(pmd, vma);
240         else
241                 return NULL;
242 }
243 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
244                 struct vm_area_struct *vma)
245 {
246         if (pud_trans_huge(*pud) || pud_devmap(*pud))
247                 return __pud_trans_huge_lock(pud, vma);
248         else
249                 return NULL;
250 }
251
252 /**
253  * folio_test_pmd_mappable - Can we map this folio with a PMD?
254  * @folio: The folio to test
255  */
256 static inline bool folio_test_pmd_mappable(struct folio *folio)
257 {
258         return folio_order(folio) >= HPAGE_PMD_ORDER;
259 }
260
261 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
262                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
263 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
264                 pud_t *pud, int flags, struct dev_pagemap **pgmap);
265
266 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
267
268 extern struct page *huge_zero_page;
269 extern unsigned long huge_zero_pfn;
270
271 static inline bool is_huge_zero_page(struct page *page)
272 {
273         return READ_ONCE(huge_zero_page) == page;
274 }
275
276 static inline bool is_huge_zero_pmd(pmd_t pmd)
277 {
278         return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
279 }
280
281 static inline bool is_huge_zero_pud(pud_t pud)
282 {
283         return false;
284 }
285
286 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
287 void mm_put_huge_zero_page(struct mm_struct *mm);
288
289 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
290
291 static inline bool thp_migration_supported(void)
292 {
293         return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
294 }
295
296 static inline struct list_head *page_deferred_list(struct page *page)
297 {
298         /*
299          * Global or memcg deferred list in the second tail pages is
300          * occupied by compound_head.
301          */
302         return &page[2].deferred_list;
303 }
304
305 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
306 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
307 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
308 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
309
310 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
311 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
312 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
313
314 static inline bool folio_test_pmd_mappable(struct folio *folio)
315 {
316         return false;
317 }
318
319 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
320 {
321         return false;
322 }
323
324 static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
325 {
326         return false;
327 }
328
329 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
330                 unsigned long haddr)
331 {
332         return false;
333 }
334
335 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
336                                           unsigned long vm_flags)
337 {
338         return false;
339 }
340
341 static inline void prep_transhuge_page(struct page *page) {}
342
343 #define transparent_hugepage_flags 0UL
344
345 #define thp_get_unmapped_area   NULL
346
347 static inline bool
348 can_split_folio(struct folio *folio, int *pextra_pins)
349 {
350         BUILD_BUG();
351         return false;
352 }
353 static inline int
354 split_huge_page_to_list(struct page *page, struct list_head *list)
355 {
356         return 0;
357 }
358 static inline int split_huge_page(struct page *page)
359 {
360         return 0;
361 }
362 static inline void deferred_split_huge_page(struct page *page) {}
363 #define split_huge_pmd(__vma, __pmd, __address) \
364         do { } while (0)
365
366 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
367                 unsigned long address, bool freeze, struct folio *folio) {}
368 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
369                 unsigned long address, bool freeze, struct folio *folio) {}
370
371 #define split_huge_pud(__vma, __pmd, __address) \
372         do { } while (0)
373
374 static inline int hugepage_madvise(struct vm_area_struct *vma,
375                                    unsigned long *vm_flags, int advice)
376 {
377         BUG();
378         return 0;
379 }
380 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
381                                          unsigned long start,
382                                          unsigned long end,
383                                          long adjust_next)
384 {
385 }
386 static inline int is_swap_pmd(pmd_t pmd)
387 {
388         return 0;
389 }
390 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
391                 struct vm_area_struct *vma)
392 {
393         return NULL;
394 }
395 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
396                 struct vm_area_struct *vma)
397 {
398         return NULL;
399 }
400
401 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
402 {
403         return 0;
404 }
405
406 static inline bool is_huge_zero_page(struct page *page)
407 {
408         return false;
409 }
410
411 static inline bool is_huge_zero_pmd(pmd_t pmd)
412 {
413         return false;
414 }
415
416 static inline bool is_huge_zero_pud(pud_t pud)
417 {
418         return false;
419 }
420
421 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
422 {
423         return;
424 }
425
426 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
427         unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
428 {
429         return NULL;
430 }
431
432 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
433         unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
434 {
435         return NULL;
436 }
437
438 static inline bool thp_migration_supported(void)
439 {
440         return false;
441 }
442 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
443
444 static inline int split_folio_to_list(struct folio *folio,
445                 struct list_head *list)
446 {
447         return split_huge_page_to_list(&folio->page, list);
448 }
449
450 #endif /* _LINUX_HUGE_MM_H */