1 // SPDX-License-Identifier: GPL-2.0-only
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
36 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37 * expectations that are being validated here. All future changes in here
38 * or the documentation need to be in sync.
41 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
44 * On s390 platform, the lower 4 bits are used to identify given page table
45 * entry type. But these bits might affect the ability to clear entries with
46 * pxx_clear() because of how dynamic page table folding works on s390. So
47 * while loading up the entries do not change the lower 4 bits. It does not
48 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49 * used to mark a pte entry.
51 #define S390_SKIP_MASK GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK GENMASK(62, 62)
55 #define PPC64_SKIP_MASK 0x0
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE GENMASK(7, 0)
61 struct pgtable_debug_args {
63 struct vm_area_struct *vma;
78 pgprot_t page_prot_none;
80 bool is_contiguous_page;
81 unsigned long pud_pfn;
82 unsigned long pmd_pfn;
83 unsigned long pte_pfn;
85 unsigned long fixed_pgd_pfn;
86 unsigned long fixed_p4d_pfn;
87 unsigned long fixed_pud_pfn;
88 unsigned long fixed_pmd_pfn;
89 unsigned long fixed_pte_pfn;
92 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
94 pgprot_t prot = protection_map[idx];
95 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
96 unsigned long val = idx, *ptr = &val;
98 pr_debug("Validating PTE basic (%pGv)\n", ptr);
101 * This test needs to be executed after the given page table entry
102 * is created with pfn_pte() to make sure that protection_map[idx]
103 * does not have the dirty bit enabled from the beginning. This is
104 * important for platforms like arm64 where (!PTE_RDONLY) indicate
105 * dirty bit being set.
107 WARN_ON(pte_dirty(pte_wrprotect(pte)));
109 WARN_ON(!pte_same(pte, pte));
110 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
111 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
112 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
113 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
114 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
115 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
116 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
117 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
120 static void __init pte_advanced_tests(struct mm_struct *mm,
121 struct vm_area_struct *vma, pte_t *ptep,
122 unsigned long pfn, unsigned long vaddr,
128 * Architectures optimize set_pte_at by avoiding TLB flush.
129 * This requires set_pte_at to be not used to update an
130 * existing pte entry. Clear pte before we do set_pte_at
133 pr_debug("Validating PTE advanced\n");
134 pte = pfn_pte(pfn, prot);
135 set_pte_at(mm, vaddr, ptep, pte);
136 ptep_set_wrprotect(mm, vaddr, ptep);
137 pte = ptep_get(ptep);
138 WARN_ON(pte_write(pte));
139 ptep_get_and_clear(mm, vaddr, ptep);
140 pte = ptep_get(ptep);
141 WARN_ON(!pte_none(pte));
143 pte = pfn_pte(pfn, prot);
144 pte = pte_wrprotect(pte);
145 pte = pte_mkclean(pte);
146 set_pte_at(mm, vaddr, ptep, pte);
147 pte = pte_mkwrite(pte);
148 pte = pte_mkdirty(pte);
149 ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
150 pte = ptep_get(ptep);
151 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
152 ptep_get_and_clear_full(mm, vaddr, ptep, 1);
153 pte = ptep_get(ptep);
154 WARN_ON(!pte_none(pte));
156 pte = pfn_pte(pfn, prot);
157 pte = pte_mkyoung(pte);
158 set_pte_at(mm, vaddr, ptep, pte);
159 ptep_test_and_clear_young(vma, vaddr, ptep);
160 pte = ptep_get(ptep);
161 WARN_ON(pte_young(pte));
164 static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
166 pte_t pte = pfn_pte(pfn, prot);
168 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
171 pr_debug("Validating PTE saved write\n");
172 WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
173 WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
176 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
177 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
179 pgprot_t prot = protection_map[idx];
180 unsigned long val = idx, *ptr = &val;
183 if (!has_transparent_hugepage())
186 pr_debug("Validating PMD basic (%pGv)\n", ptr);
187 pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
190 * This test needs to be executed after the given page table entry
191 * is created with pfn_pmd() to make sure that protection_map[idx]
192 * does not have the dirty bit enabled from the beginning. This is
193 * important for platforms like arm64 where (!PTE_RDONLY) indicate
194 * dirty bit being set.
196 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
199 WARN_ON(!pmd_same(pmd, pmd));
200 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
201 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
202 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
203 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
204 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
205 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
206 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
207 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
209 * A huge page does not point to next level page table
210 * entry. Hence this must qualify as pmd_bad().
212 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
215 static void __init pmd_advanced_tests(struct mm_struct *mm,
216 struct vm_area_struct *vma, pmd_t *pmdp,
217 unsigned long pfn, unsigned long vaddr,
218 pgprot_t prot, pgtable_t pgtable)
222 if (!has_transparent_hugepage())
225 pr_debug("Validating PMD advanced\n");
226 /* Align the address wrt HPAGE_PMD_SIZE */
227 vaddr &= HPAGE_PMD_MASK;
229 pgtable_trans_huge_deposit(mm, pmdp, pgtable);
231 pmd = pfn_pmd(pfn, prot);
232 set_pmd_at(mm, vaddr, pmdp, pmd);
233 pmdp_set_wrprotect(mm, vaddr, pmdp);
234 pmd = READ_ONCE(*pmdp);
235 WARN_ON(pmd_write(pmd));
236 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
237 pmd = READ_ONCE(*pmdp);
238 WARN_ON(!pmd_none(pmd));
240 pmd = pfn_pmd(pfn, prot);
241 pmd = pmd_wrprotect(pmd);
242 pmd = pmd_mkclean(pmd);
243 set_pmd_at(mm, vaddr, pmdp, pmd);
244 pmd = pmd_mkwrite(pmd);
245 pmd = pmd_mkdirty(pmd);
246 pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
247 pmd = READ_ONCE(*pmdp);
248 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
249 pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
250 pmd = READ_ONCE(*pmdp);
251 WARN_ON(!pmd_none(pmd));
253 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
254 pmd = pmd_mkyoung(pmd);
255 set_pmd_at(mm, vaddr, pmdp, pmd);
256 pmdp_test_and_clear_young(vma, vaddr, pmdp);
257 pmd = READ_ONCE(*pmdp);
258 WARN_ON(pmd_young(pmd));
260 /* Clear the pte entries */
261 pmdp_huge_get_and_clear(mm, vaddr, pmdp);
262 pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
265 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
269 if (!has_transparent_hugepage())
272 pr_debug("Validating PMD leaf\n");
273 pmd = pfn_pmd(pfn, prot);
276 * PMD based THP is a leaf entry.
278 pmd = pmd_mkhuge(pmd);
279 WARN_ON(!pmd_leaf(pmd));
282 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
286 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
289 if (!has_transparent_hugepage())
292 pr_debug("Validating PMD saved write\n");
293 pmd = pfn_pmd(pfn, prot);
294 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
295 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
298 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
299 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
301 pgprot_t prot = protection_map[idx];
302 unsigned long val = idx, *ptr = &val;
305 if (!has_transparent_hugepage())
308 pr_debug("Validating PUD basic (%pGv)\n", ptr);
309 pud = pfn_pud(args->fixed_pud_pfn, prot);
312 * This test needs to be executed after the given page table entry
313 * is created with pfn_pud() to make sure that protection_map[idx]
314 * does not have the dirty bit enabled from the beginning. This is
315 * important for platforms like arm64 where (!PTE_RDONLY) indicate
316 * dirty bit being set.
318 WARN_ON(pud_dirty(pud_wrprotect(pud)));
320 WARN_ON(!pud_same(pud, pud));
321 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
322 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
323 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
324 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
325 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
326 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
327 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
328 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
330 if (mm_pmd_folded(args->mm))
334 * A huge page does not point to next level page table
335 * entry. Hence this must qualify as pud_bad().
337 WARN_ON(!pud_bad(pud_mkhuge(pud)));
340 static void __init pud_advanced_tests(struct mm_struct *mm,
341 struct vm_area_struct *vma, pud_t *pudp,
342 unsigned long pfn, unsigned long vaddr,
347 if (!has_transparent_hugepage())
350 pr_debug("Validating PUD advanced\n");
351 /* Align the address wrt HPAGE_PUD_SIZE */
352 vaddr &= HPAGE_PUD_MASK;
354 pud = pfn_pud(pfn, prot);
355 set_pud_at(mm, vaddr, pudp, pud);
356 pudp_set_wrprotect(mm, vaddr, pudp);
357 pud = READ_ONCE(*pudp);
358 WARN_ON(pud_write(pud));
360 #ifndef __PAGETABLE_PMD_FOLDED
361 pudp_huge_get_and_clear(mm, vaddr, pudp);
362 pud = READ_ONCE(*pudp);
363 WARN_ON(!pud_none(pud));
364 #endif /* __PAGETABLE_PMD_FOLDED */
365 pud = pfn_pud(pfn, prot);
366 pud = pud_wrprotect(pud);
367 pud = pud_mkclean(pud);
368 set_pud_at(mm, vaddr, pudp, pud);
369 pud = pud_mkwrite(pud);
370 pud = pud_mkdirty(pud);
371 pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
372 pud = READ_ONCE(*pudp);
373 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
375 #ifndef __PAGETABLE_PMD_FOLDED
376 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
377 pud = READ_ONCE(*pudp);
378 WARN_ON(!pud_none(pud));
379 #endif /* __PAGETABLE_PMD_FOLDED */
381 pud = pfn_pud(pfn, prot);
382 pud = pud_mkyoung(pud);
383 set_pud_at(mm, vaddr, pudp, pud);
384 pudp_test_and_clear_young(vma, vaddr, pudp);
385 pud = READ_ONCE(*pudp);
386 WARN_ON(pud_young(pud));
388 pudp_huge_get_and_clear(mm, vaddr, pudp);
391 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
395 if (!has_transparent_hugepage())
398 pr_debug("Validating PUD leaf\n");
399 pud = pfn_pud(pfn, prot);
401 * PUD based THP is a leaf entry.
403 pud = pud_mkhuge(pud);
404 WARN_ON(!pud_leaf(pud));
406 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
407 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
408 static void __init pud_advanced_tests(struct mm_struct *mm,
409 struct vm_area_struct *vma, pud_t *pudp,
410 unsigned long pfn, unsigned long vaddr,
414 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
415 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
416 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
417 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
418 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
419 static void __init pmd_advanced_tests(struct mm_struct *mm,
420 struct vm_area_struct *vma, pmd_t *pmdp,
421 unsigned long pfn, unsigned long vaddr,
422 pgprot_t prot, pgtable_t pgtable)
425 static void __init pud_advanced_tests(struct mm_struct *mm,
426 struct vm_area_struct *vma, pud_t *pudp,
427 unsigned long pfn, unsigned long vaddr,
431 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
432 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
433 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
434 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
436 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
437 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
441 if (!arch_vmap_pmd_supported(prot))
444 pr_debug("Validating PMD huge\n");
446 * X86 defined pmd_set_huge() verifies that the given
447 * PMD is not a populated non-leaf entry.
449 WRITE_ONCE(*pmdp, __pmd(0));
450 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
451 WARN_ON(!pmd_clear_huge(pmdp));
452 pmd = READ_ONCE(*pmdp);
453 WARN_ON(!pmd_none(pmd));
456 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
460 if (!arch_vmap_pud_supported(prot))
463 pr_debug("Validating PUD huge\n");
465 * X86 defined pud_set_huge() verifies that the given
466 * PUD is not a populated non-leaf entry.
468 WRITE_ONCE(*pudp, __pud(0));
469 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
470 WARN_ON(!pud_clear_huge(pudp));
471 pud = READ_ONCE(*pudp);
472 WARN_ON(!pud_none(pud));
474 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
475 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
476 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
477 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
479 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
483 pr_debug("Validating P4D basic\n");
484 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
485 WARN_ON(!p4d_same(p4d, p4d));
488 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
492 pr_debug("Validating PGD basic\n");
493 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
494 WARN_ON(!pgd_same(pgd, pgd));
497 #ifndef __PAGETABLE_PUD_FOLDED
498 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
500 pud_t pud = READ_ONCE(*pudp);
502 if (mm_pmd_folded(mm))
505 pr_debug("Validating PUD clear\n");
506 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
507 WRITE_ONCE(*pudp, pud);
509 pud = READ_ONCE(*pudp);
510 WARN_ON(!pud_none(pud));
513 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
518 if (mm_pmd_folded(mm))
521 pr_debug("Validating PUD populate\n");
523 * This entry points to next level page table page.
524 * Hence this must not qualify as pud_bad().
526 pud_populate(mm, pudp, pmdp);
527 pud = READ_ONCE(*pudp);
528 WARN_ON(pud_bad(pud));
530 #else /* !__PAGETABLE_PUD_FOLDED */
531 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
532 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
536 #endif /* PAGETABLE_PUD_FOLDED */
538 #ifndef __PAGETABLE_P4D_FOLDED
539 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
541 p4d_t p4d = READ_ONCE(*p4dp);
543 if (mm_pud_folded(mm))
546 pr_debug("Validating P4D clear\n");
547 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
548 WRITE_ONCE(*p4dp, p4d);
550 p4d = READ_ONCE(*p4dp);
551 WARN_ON(!p4d_none(p4d));
554 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
559 if (mm_pud_folded(mm))
562 pr_debug("Validating P4D populate\n");
564 * This entry points to next level page table page.
565 * Hence this must not qualify as p4d_bad().
569 p4d_populate(mm, p4dp, pudp);
570 p4d = READ_ONCE(*p4dp);
571 WARN_ON(p4d_bad(p4d));
574 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
576 pgd_t pgd = READ_ONCE(*pgdp);
578 if (mm_p4d_folded(mm))
581 pr_debug("Validating PGD clear\n");
582 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
583 WRITE_ONCE(*pgdp, pgd);
585 pgd = READ_ONCE(*pgdp);
586 WARN_ON(!pgd_none(pgd));
589 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
594 if (mm_p4d_folded(mm))
597 pr_debug("Validating PGD populate\n");
599 * This entry points to next level page table page.
600 * Hence this must not qualify as pgd_bad().
604 pgd_populate(mm, pgdp, p4dp);
605 pgd = READ_ONCE(*pgdp);
606 WARN_ON(pgd_bad(pgd));
608 #else /* !__PAGETABLE_P4D_FOLDED */
609 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
610 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
611 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
615 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
619 #endif /* PAGETABLE_P4D_FOLDED */
621 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
622 unsigned long pfn, unsigned long vaddr,
625 pte_t pte = pfn_pte(pfn, prot);
627 pr_debug("Validating PTE clear\n");
629 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
631 set_pte_at(mm, vaddr, ptep, pte);
633 pte_clear(mm, vaddr, ptep);
634 pte = ptep_get(ptep);
635 WARN_ON(!pte_none(pte));
638 static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
640 pmd_t pmd = READ_ONCE(*pmdp);
642 pr_debug("Validating PMD clear\n");
643 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
644 WRITE_ONCE(*pmdp, pmd);
646 pmd = READ_ONCE(*pmdp);
647 WARN_ON(!pmd_none(pmd));
650 static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
655 pr_debug("Validating PMD populate\n");
657 * This entry points to next level page table page.
658 * Hence this must not qualify as pmd_bad().
660 pmd_populate(mm, pmdp, pgtable);
661 pmd = READ_ONCE(*pmdp);
662 WARN_ON(pmd_bad(pmd));
665 static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
667 pte_t pte = pfn_pte(pfn, prot);
669 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
672 pr_debug("Validating PTE special\n");
673 WARN_ON(!pte_special(pte_mkspecial(pte)));
676 static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
678 pte_t pte = pfn_pte(pfn, prot);
680 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
683 pr_debug("Validating PTE protnone\n");
684 WARN_ON(!pte_protnone(pte));
685 WARN_ON(!pte_present(pte));
688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
693 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
696 if (!has_transparent_hugepage())
699 pr_debug("Validating PMD protnone\n");
700 pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
701 WARN_ON(!pmd_protnone(pmd));
702 WARN_ON(!pmd_present(pmd));
704 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
705 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
706 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
708 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
709 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
711 pte_t pte = pfn_pte(pfn, prot);
713 pr_debug("Validating PTE devmap\n");
714 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
717 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
718 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
722 if (!has_transparent_hugepage())
725 pr_debug("Validating PMD devmap\n");
726 pmd = pfn_pmd(pfn, prot);
727 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
730 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
731 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
735 if (!has_transparent_hugepage())
738 pr_debug("Validating PUD devmap\n");
739 pud = pfn_pud(pfn, prot);
740 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
742 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
743 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
744 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
745 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
746 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
747 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
748 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
750 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
751 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
752 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
753 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
755 static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
757 pte_t pte = pfn_pte(pfn, prot);
759 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
762 pr_debug("Validating PTE soft dirty\n");
763 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
764 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
767 static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
769 pte_t pte = pfn_pte(pfn, prot);
771 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
774 pr_debug("Validating PTE swap soft dirty\n");
775 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
776 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
779 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
780 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
784 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
787 if (!has_transparent_hugepage())
790 pr_debug("Validating PMD soft dirty\n");
791 pmd = pfn_pmd(pfn, prot);
792 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
793 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
796 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
800 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
801 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
804 if (!has_transparent_hugepage())
807 pr_debug("Validating PMD swap soft dirty\n");
808 pmd = pfn_pmd(pfn, prot);
809 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
810 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
812 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
813 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
814 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
817 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
819 static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
824 pr_debug("Validating PTE swap\n");
825 pte = pfn_pte(pfn, prot);
826 swp = __pte_to_swp_entry(pte);
827 pte = __swp_entry_to_pte(swp);
828 WARN_ON(pfn != pte_pfn(pte));
831 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
832 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
837 if (!has_transparent_hugepage())
840 pr_debug("Validating PMD swap\n");
841 pmd = pfn_pmd(pfn, prot);
842 swp = __pmd_to_swp_entry(pmd);
843 pmd = __swp_entry_to_pmd(swp);
844 WARN_ON(pfn != pmd_pfn(pmd));
846 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
847 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
848 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
850 static void __init swap_migration_tests(void)
855 if (!IS_ENABLED(CONFIG_MIGRATION))
858 pr_debug("Validating swap migration\n");
860 * swap_migration_tests() requires a dedicated page as it needs to
861 * be locked before creating a migration entry from it. Locking the
862 * page that actually maps kernel text ('start_kernel') can be real
863 * problematic. Lets allocate a dedicated page explicitly for this
864 * purpose that will be freed subsequently.
866 page = alloc_page(GFP_KERNEL);
868 pr_err("page allocation failed\n");
873 * make_migration_entry() expects given page to be
874 * locked, otherwise it stumbles upon a BUG_ON().
876 __SetPageLocked(page);
877 swp = make_writable_migration_entry(page_to_pfn(page));
878 WARN_ON(!is_migration_entry(swp));
879 WARN_ON(!is_writable_migration_entry(swp));
881 swp = make_readable_migration_entry(swp_offset(swp));
882 WARN_ON(!is_migration_entry(swp));
883 WARN_ON(is_writable_migration_entry(swp));
885 swp = make_readable_migration_entry(page_to_pfn(page));
886 WARN_ON(!is_migration_entry(swp));
887 WARN_ON(is_writable_migration_entry(swp));
888 __ClearPageLocked(page);
892 #ifdef CONFIG_HUGETLB_PAGE
893 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
898 pr_debug("Validating HugeTLB basic\n");
900 * Accessing the page associated with the pfn is safe here,
901 * as it was previously derived from a real kernel symbol.
903 page = pfn_to_page(args->fixed_pmd_pfn);
904 pte = mk_huge_pte(page, args->page_prot);
906 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
907 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
908 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
910 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
911 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
913 WARN_ON(!pte_huge(pte_mkhuge(pte)));
914 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
916 #else /* !CONFIG_HUGETLB_PAGE */
917 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
918 #endif /* CONFIG_HUGETLB_PAGE */
920 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
921 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
925 if (!has_transparent_hugepage())
928 pr_debug("Validating PMD based THP\n");
930 * pmd_trans_huge() and pmd_present() must return positive after
931 * MMU invalidation with pmd_mkinvalid(). This behavior is an
932 * optimization for transparent huge page. pmd_trans_huge() must
933 * be true if pmd_page() returns a valid THP to avoid taking the
934 * pmd_lock when others walk over non transhuge pmds (i.e. there
935 * are no THP allocated). Especially when splitting a THP and
936 * removing the present bit from the pmd, pmd_trans_huge() still
937 * needs to return true. pmd_present() should be true whenever
938 * pmd_trans_huge() returns true.
940 pmd = pfn_pmd(pfn, prot);
941 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
943 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
944 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
945 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
946 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
949 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
950 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
954 if (!has_transparent_hugepage())
957 pr_debug("Validating PUD based THP\n");
958 pud = pfn_pud(pfn, prot);
959 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
962 * pud_mkinvalid() has been dropped for now. Enable back
963 * these tests when it comes back with a modified pud_present().
965 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
966 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
969 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
970 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
971 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
972 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
973 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
974 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
975 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
977 static unsigned long __init get_random_vaddr(void)
979 unsigned long random_vaddr, random_pages, total_user_pages;
981 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
983 random_pages = get_random_long() % total_user_pages;
984 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
989 static void __init destroy_args(struct pgtable_debug_args *args)
991 struct page *page = NULL;
993 /* Free (huge) page */
994 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
995 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
996 has_transparent_hugepage() &&
997 args->pud_pfn != ULONG_MAX) {
998 if (args->is_contiguous_page) {
999 free_contig_range(args->pud_pfn,
1000 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1002 page = pfn_to_page(args->pud_pfn);
1003 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1006 args->pud_pfn = ULONG_MAX;
1007 args->pmd_pfn = ULONG_MAX;
1008 args->pte_pfn = ULONG_MAX;
1011 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1012 has_transparent_hugepage() &&
1013 args->pmd_pfn != ULONG_MAX) {
1014 if (args->is_contiguous_page) {
1015 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1017 page = pfn_to_page(args->pmd_pfn);
1018 __free_pages(page, HPAGE_PMD_ORDER);
1021 args->pmd_pfn = ULONG_MAX;
1022 args->pte_pfn = ULONG_MAX;
1025 if (args->pte_pfn != ULONG_MAX) {
1026 page = pfn_to_page(args->pte_pfn);
1027 __free_pages(page, 0);
1029 args->pte_pfn = ULONG_MAX;
1032 /* Free page table entries */
1033 if (args->start_ptep) {
1034 pte_free(args->mm, args->start_ptep);
1035 mm_dec_nr_ptes(args->mm);
1038 if (args->start_pmdp) {
1039 pmd_free(args->mm, args->start_pmdp);
1040 mm_dec_nr_pmds(args->mm);
1043 if (args->start_pudp) {
1044 pud_free(args->mm, args->start_pudp);
1045 mm_dec_nr_puds(args->mm);
1048 if (args->start_p4dp)
1049 p4d_free(args->mm, args->start_p4dp);
1051 /* Free vma and mm struct */
1053 vm_area_free(args->vma);
1059 static struct page * __init
1060 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1062 struct page *page = NULL;
1064 #ifdef CONFIG_CONTIG_ALLOC
1065 if (order >= MAX_ORDER) {
1066 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1067 first_online_node, NULL);
1069 args->is_contiguous_page = true;
1075 if (order < MAX_ORDER)
1076 page = alloc_pages(GFP_KERNEL, order);
1081 static int __init init_args(struct pgtable_debug_args *args)
1083 struct page *page = NULL;
1088 * Initialize the debugging data.
1090 * __P000 (or even __S000) will help create page table entries with
1091 * PROT_NONE permission as required for pxx_protnone_tests().
1093 memset(args, 0, sizeof(*args));
1094 args->vaddr = get_random_vaddr();
1095 args->page_prot = vm_get_page_prot(VMFLAGS);
1096 args->page_prot_none = __P000;
1097 args->is_contiguous_page = false;
1098 args->pud_pfn = ULONG_MAX;
1099 args->pmd_pfn = ULONG_MAX;
1100 args->pte_pfn = ULONG_MAX;
1101 args->fixed_pgd_pfn = ULONG_MAX;
1102 args->fixed_p4d_pfn = ULONG_MAX;
1103 args->fixed_pud_pfn = ULONG_MAX;
1104 args->fixed_pmd_pfn = ULONG_MAX;
1105 args->fixed_pte_pfn = ULONG_MAX;
1107 /* Allocate mm and vma */
1108 args->mm = mm_alloc();
1110 pr_err("Failed to allocate mm struct\n");
1115 args->vma = vm_area_alloc(args->mm);
1117 pr_err("Failed to allocate vma\n");
1123 * Allocate page table entries. They will be modified in the tests.
1124 * Lets save the page table entries so that they can be released
1125 * when the tests are completed.
1127 args->pgdp = pgd_offset(args->mm, args->vaddr);
1128 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1130 pr_err("Failed to allocate p4d entries\n");
1134 args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1135 WARN_ON(!args->start_p4dp);
1137 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1139 pr_err("Failed to allocate pud entries\n");
1143 args->start_pudp = pud_offset(args->p4dp, 0UL);
1144 WARN_ON(!args->start_pudp);
1146 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1148 pr_err("Failed to allocate pmd entries\n");
1152 args->start_pmdp = pmd_offset(args->pudp, 0UL);
1153 WARN_ON(!args->start_pmdp);
1155 if (pte_alloc(args->mm, args->pmdp)) {
1156 pr_err("Failed to allocate pte entries\n");
1160 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1161 WARN_ON(!args->start_ptep);
1164 * PFN for mapping at PTE level is determined from a standard kernel
1165 * text symbol. But pfns for higher page table levels are derived by
1166 * masking lower bits of this real pfn. These derived pfns might not
1167 * exist on the platform but that does not really matter as pfn_pxx()
1168 * helpers will still create appropriate entries for the test. This
1169 * helps avoid large memory block allocations to be used for mapping
1170 * at higher page table levels in some of the tests.
1172 phys = __pa_symbol(&start_kernel);
1173 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1174 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1175 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1176 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1177 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1178 WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1181 * Allocate (huge) pages because some of the tests need to access
1182 * the data in the pages. The corresponding tests will be skipped
1183 * if we fail to allocate (huge) pages.
1185 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1186 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1187 has_transparent_hugepage()) {
1188 page = debug_vm_pgtable_alloc_huge_page(args,
1189 HPAGE_PUD_SHIFT - PAGE_SHIFT);
1191 args->pud_pfn = page_to_pfn(page);
1192 args->pmd_pfn = args->pud_pfn;
1193 args->pte_pfn = args->pud_pfn;
1198 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1199 has_transparent_hugepage()) {
1200 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1202 args->pmd_pfn = page_to_pfn(page);
1203 args->pte_pfn = args->pmd_pfn;
1208 page = alloc_pages(GFP_KERNEL, 0);
1210 args->pte_pfn = page_to_pfn(page);
1219 static int __init debug_vm_pgtable(void)
1221 struct pgtable_debug_args args;
1222 struct vm_area_struct *vma;
1223 struct mm_struct *mm;
1225 p4d_t *p4dp, *saved_p4dp;
1226 pud_t *pudp, *saved_pudp;
1227 pmd_t *pmdp, *saved_pmdp, pmd;
1229 pgtable_t saved_ptep;
1230 pgprot_t prot, protnone;
1232 unsigned long vaddr, pte_aligned, pmd_aligned;
1233 unsigned long pud_aligned;
1234 spinlock_t *ptl = NULL;
1237 pr_info("Validating architecture page table helpers\n");
1238 ret = init_args(&args);
1242 prot = vm_get_page_prot(VMFLAGS);
1243 vaddr = get_random_vaddr();
1246 pr_err("mm_struct allocation failed\n");
1251 * __P000 (or even __S000) will help create page table entries with
1252 * PROT_NONE permission as required for pxx_protnone_tests().
1256 vma = vm_area_alloc(mm);
1258 pr_err("vma allocation failed\n");
1263 * PFN for mapping at PTE level is determined from a standard kernel
1264 * text symbol. But pfns for higher page table levels are derived by
1265 * masking lower bits of this real pfn. These derived pfns might not
1266 * exist on the platform but that does not really matter as pfn_pxx()
1267 * helpers will still create appropriate entries for the test. This
1268 * helps avoid large memory block allocations to be used for mapping
1269 * at higher page table levels.
1271 paddr = __pa_symbol(&start_kernel);
1273 pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
1274 pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
1275 pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
1276 WARN_ON(!pfn_valid(pte_aligned));
1278 pgdp = pgd_offset(mm, vaddr);
1279 p4dp = p4d_alloc(mm, pgdp, vaddr);
1280 pudp = pud_alloc(mm, p4dp, vaddr);
1281 pmdp = pmd_alloc(mm, pudp, vaddr);
1283 * Allocate pgtable_t
1285 if (pte_alloc(mm, pmdp)) {
1286 pr_err("pgtable allocation failed\n");
1291 * Save all the page table page addresses as the page table
1292 * entries will be used for testing with random or garbage
1293 * values. These saved addresses will be used for freeing
1296 pmd = READ_ONCE(*pmdp);
1297 saved_p4dp = p4d_offset(pgdp, 0UL);
1298 saved_pudp = pud_offset(p4dp, 0UL);
1299 saved_pmdp = pmd_offset(pudp, 0UL);
1300 saved_ptep = pmd_pgtable(pmd);
1303 * Iterate over the protection_map[] to make sure that all
1304 * the basic page table transformation validations just hold
1305 * true irrespective of the starting protection value for a
1306 * given page table entry.
1308 for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1309 pte_basic_tests(&args, idx);
1310 pmd_basic_tests(&args, idx);
1311 pud_basic_tests(&args, idx);
1315 * Both P4D and PGD level tests are very basic which do not
1316 * involve creating page table entries from the protection
1317 * value and the given pfn. Hence just keep them out from
1318 * the above iteration for now to save some test execution
1321 p4d_basic_tests(&args);
1322 pgd_basic_tests(&args);
1324 pmd_leaf_tests(pmd_aligned, prot);
1325 pud_leaf_tests(pud_aligned, prot);
1327 pte_savedwrite_tests(pte_aligned, protnone);
1328 pmd_savedwrite_tests(pmd_aligned, protnone);
1330 pte_special_tests(pte_aligned, prot);
1331 pte_protnone_tests(pte_aligned, protnone);
1332 pmd_protnone_tests(pmd_aligned, protnone);
1334 pte_devmap_tests(pte_aligned, prot);
1335 pmd_devmap_tests(pmd_aligned, prot);
1336 pud_devmap_tests(pud_aligned, prot);
1338 pte_soft_dirty_tests(pte_aligned, prot);
1339 pmd_soft_dirty_tests(pmd_aligned, prot);
1340 pte_swap_soft_dirty_tests(pte_aligned, prot);
1341 pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1343 pte_swap_tests(pte_aligned, prot);
1344 pmd_swap_tests(pmd_aligned, prot);
1346 swap_migration_tests();
1348 pmd_thp_tests(pmd_aligned, prot);
1349 pud_thp_tests(pud_aligned, prot);
1351 hugetlb_basic_tests(&args);
1354 * Page table modifying tests. They need to hold
1355 * proper page table lock.
1358 ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1359 pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1360 pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1361 pte_unmap_unlock(ptep, ptl);
1363 ptl = pmd_lock(mm, pmdp);
1364 pmd_clear_tests(mm, pmdp);
1365 pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1366 pmd_huge_tests(pmdp, pmd_aligned, prot);
1367 pmd_populate_tests(mm, pmdp, saved_ptep);
1370 ptl = pud_lock(mm, pudp);
1371 pud_clear_tests(mm, pudp);
1372 pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1373 pud_huge_tests(pudp, pud_aligned, prot);
1374 pud_populate_tests(mm, pudp, saved_pmdp);
1377 spin_lock(&mm->page_table_lock);
1378 p4d_clear_tests(mm, p4dp);
1379 pgd_clear_tests(mm, pgdp);
1380 p4d_populate_tests(mm, p4dp, saved_pudp);
1381 pgd_populate_tests(mm, pgdp, saved_p4dp);
1382 spin_unlock(&mm->page_table_lock);
1384 p4d_free(mm, saved_p4dp);
1385 pud_free(mm, saved_pudp);
1386 pmd_free(mm, saved_pmdp);
1387 pte_free(mm, saved_ptep);
1395 destroy_args(&args);
1398 late_initcall(debug_vm_pgtable);