Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / mm / debug_vm_pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34
35 /*
36  * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37  * expectations that are being validated here. All future changes in here
38  * or the documentation need to be in sync.
39  */
40
41 #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
42
43 /*
44  * On s390 platform, the lower 4 bits are used to identify given page table
45  * entry type. But these bits might affect the ability to clear entries with
46  * pxx_clear() because of how dynamic page table folding works on s390. So
47  * while loading up the entries do not change the lower 4 bits. It does not
48  * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49  * used to mark a pte entry.
50  */
51 #define S390_SKIP_MASK          GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK         GENMASK(62, 62)
54 #else
55 #define PPC64_SKIP_MASK         0x0
56 #endif
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE  GENMASK(7, 0)
60
61 static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
62 {
63         pte_t pte = pfn_pte(pfn, prot);
64
65         pr_debug("Validating PTE basic\n");
66         WARN_ON(!pte_same(pte, pte));
67         WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
68         WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
69         WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
70         WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
71         WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
72         WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
73 }
74
75 static void __init pte_advanced_tests(struct mm_struct *mm,
76                                       struct vm_area_struct *vma, pte_t *ptep,
77                                       unsigned long pfn, unsigned long vaddr,
78                                       pgprot_t prot)
79 {
80         pte_t pte = pfn_pte(pfn, prot);
81
82         /*
83          * Architectures optimize set_pte_at by avoiding TLB flush.
84          * This requires set_pte_at to be not used to update an
85          * existing pte entry. Clear pte before we do set_pte_at
86          */
87
88         pr_debug("Validating PTE advanced\n");
89         pte = pfn_pte(pfn, prot);
90         set_pte_at(mm, vaddr, ptep, pte);
91         ptep_set_wrprotect(mm, vaddr, ptep);
92         pte = ptep_get(ptep);
93         WARN_ON(pte_write(pte));
94         ptep_get_and_clear(mm, vaddr, ptep);
95         pte = ptep_get(ptep);
96         WARN_ON(!pte_none(pte));
97
98         pte = pfn_pte(pfn, prot);
99         pte = pte_wrprotect(pte);
100         pte = pte_mkclean(pte);
101         set_pte_at(mm, vaddr, ptep, pte);
102         pte = pte_mkwrite(pte);
103         pte = pte_mkdirty(pte);
104         ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
105         pte = ptep_get(ptep);
106         WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
107         ptep_get_and_clear_full(mm, vaddr, ptep, 1);
108         pte = ptep_get(ptep);
109         WARN_ON(!pte_none(pte));
110
111         pte = pfn_pte(pfn, prot);
112         pte = pte_mkyoung(pte);
113         set_pte_at(mm, vaddr, ptep, pte);
114         ptep_test_and_clear_young(vma, vaddr, ptep);
115         pte = ptep_get(ptep);
116         WARN_ON(pte_young(pte));
117 }
118
119 static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
120 {
121         pte_t pte = pfn_pte(pfn, prot);
122
123         if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
124                 return;
125
126         pr_debug("Validating PTE saved write\n");
127         WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
128         WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
129 }
130
131 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
132 static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
133 {
134         pmd_t pmd = pfn_pmd(pfn, prot);
135
136         if (!has_transparent_hugepage())
137                 return;
138
139         pr_debug("Validating PMD basic\n");
140         WARN_ON(!pmd_same(pmd, pmd));
141         WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
142         WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
143         WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
144         WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
145         WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
146         WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
147         /*
148          * A huge page does not point to next level page table
149          * entry. Hence this must qualify as pmd_bad().
150          */
151         WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
152 }
153
154 static void __init pmd_advanced_tests(struct mm_struct *mm,
155                                       struct vm_area_struct *vma, pmd_t *pmdp,
156                                       unsigned long pfn, unsigned long vaddr,
157                                       pgprot_t prot, pgtable_t pgtable)
158 {
159         pmd_t pmd = pfn_pmd(pfn, prot);
160
161         if (!has_transparent_hugepage())
162                 return;
163
164         pr_debug("Validating PMD advanced\n");
165         /* Align the address wrt HPAGE_PMD_SIZE */
166         vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
167
168         pgtable_trans_huge_deposit(mm, pmdp, pgtable);
169
170         pmd = pfn_pmd(pfn, prot);
171         set_pmd_at(mm, vaddr, pmdp, pmd);
172         pmdp_set_wrprotect(mm, vaddr, pmdp);
173         pmd = READ_ONCE(*pmdp);
174         WARN_ON(pmd_write(pmd));
175         pmdp_huge_get_and_clear(mm, vaddr, pmdp);
176         pmd = READ_ONCE(*pmdp);
177         WARN_ON(!pmd_none(pmd));
178
179         pmd = pfn_pmd(pfn, prot);
180         pmd = pmd_wrprotect(pmd);
181         pmd = pmd_mkclean(pmd);
182         set_pmd_at(mm, vaddr, pmdp, pmd);
183         pmd = pmd_mkwrite(pmd);
184         pmd = pmd_mkdirty(pmd);
185         pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
186         pmd = READ_ONCE(*pmdp);
187         WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
188         pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
189         pmd = READ_ONCE(*pmdp);
190         WARN_ON(!pmd_none(pmd));
191
192         pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
193         pmd = pmd_mkyoung(pmd);
194         set_pmd_at(mm, vaddr, pmdp, pmd);
195         pmdp_test_and_clear_young(vma, vaddr, pmdp);
196         pmd = READ_ONCE(*pmdp);
197         WARN_ON(pmd_young(pmd));
198
199         /*  Clear the pte entries  */
200         pmdp_huge_get_and_clear(mm, vaddr, pmdp);
201         pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
202 }
203
204 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
205 {
206         pmd_t pmd = pfn_pmd(pfn, prot);
207
208         pr_debug("Validating PMD leaf\n");
209         /*
210          * PMD based THP is a leaf entry.
211          */
212         pmd = pmd_mkhuge(pmd);
213         WARN_ON(!pmd_leaf(pmd));
214 }
215
216 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
217 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
218 {
219         pmd_t pmd;
220
221         if (!arch_ioremap_pmd_supported())
222                 return;
223
224         pr_debug("Validating PMD huge\n");
225         /*
226          * X86 defined pmd_set_huge() verifies that the given
227          * PMD is not a populated non-leaf entry.
228          */
229         WRITE_ONCE(*pmdp, __pmd(0));
230         WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
231         WARN_ON(!pmd_clear_huge(pmdp));
232         pmd = READ_ONCE(*pmdp);
233         WARN_ON(!pmd_none(pmd));
234 }
235 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
236 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
237 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
238
239 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
240 {
241         pmd_t pmd = pfn_pmd(pfn, prot);
242
243         if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
244                 return;
245
246         pr_debug("Validating PMD saved write\n");
247         WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
248         WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
249 }
250
251 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
252 static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
253 {
254         pud_t pud = pfn_pud(pfn, prot);
255
256         if (!has_transparent_hugepage())
257                 return;
258
259         pr_debug("Validating PUD basic\n");
260         WARN_ON(!pud_same(pud, pud));
261         WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
262         WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
263         WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
264         WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
265
266         if (mm_pmd_folded(mm))
267                 return;
268
269         /*
270          * A huge page does not point to next level page table
271          * entry. Hence this must qualify as pud_bad().
272          */
273         WARN_ON(!pud_bad(pud_mkhuge(pud)));
274 }
275
276 static void __init pud_advanced_tests(struct mm_struct *mm,
277                                       struct vm_area_struct *vma, pud_t *pudp,
278                                       unsigned long pfn, unsigned long vaddr,
279                                       pgprot_t prot)
280 {
281         pud_t pud = pfn_pud(pfn, prot);
282
283         if (!has_transparent_hugepage())
284                 return;
285
286         pr_debug("Validating PUD advanced\n");
287         /* Align the address wrt HPAGE_PUD_SIZE */
288         vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
289
290         set_pud_at(mm, vaddr, pudp, pud);
291         pudp_set_wrprotect(mm, vaddr, pudp);
292         pud = READ_ONCE(*pudp);
293         WARN_ON(pud_write(pud));
294
295 #ifndef __PAGETABLE_PMD_FOLDED
296         pudp_huge_get_and_clear(mm, vaddr, pudp);
297         pud = READ_ONCE(*pudp);
298         WARN_ON(!pud_none(pud));
299 #endif /* __PAGETABLE_PMD_FOLDED */
300         pud = pfn_pud(pfn, prot);
301         pud = pud_wrprotect(pud);
302         pud = pud_mkclean(pud);
303         set_pud_at(mm, vaddr, pudp, pud);
304         pud = pud_mkwrite(pud);
305         pud = pud_mkdirty(pud);
306         pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
307         pud = READ_ONCE(*pudp);
308         WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
309
310 #ifndef __PAGETABLE_PMD_FOLDED
311         pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
312         pud = READ_ONCE(*pudp);
313         WARN_ON(!pud_none(pud));
314 #endif /* __PAGETABLE_PMD_FOLDED */
315
316         pud = pfn_pud(pfn, prot);
317         pud = pud_mkyoung(pud);
318         set_pud_at(mm, vaddr, pudp, pud);
319         pudp_test_and_clear_young(vma, vaddr, pudp);
320         pud = READ_ONCE(*pudp);
321         WARN_ON(pud_young(pud));
322
323         pudp_huge_get_and_clear(mm, vaddr, pudp);
324 }
325
326 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
327 {
328         pud_t pud = pfn_pud(pfn, prot);
329
330         pr_debug("Validating PUD leaf\n");
331         /*
332          * PUD based THP is a leaf entry.
333          */
334         pud = pud_mkhuge(pud);
335         WARN_ON(!pud_leaf(pud));
336 }
337
338 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
339 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
340 {
341         pud_t pud;
342
343         if (!arch_ioremap_pud_supported())
344                 return;
345
346         pr_debug("Validating PUD huge\n");
347         /*
348          * X86 defined pud_set_huge() verifies that the given
349          * PUD is not a populated non-leaf entry.
350          */
351         WRITE_ONCE(*pudp, __pud(0));
352         WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
353         WARN_ON(!pud_clear_huge(pudp));
354         pud = READ_ONCE(*pudp);
355         WARN_ON(!pud_none(pud));
356 }
357 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
358 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
359 #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
360
361 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
362 static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
363 static void __init pud_advanced_tests(struct mm_struct *mm,
364                                       struct vm_area_struct *vma, pud_t *pudp,
365                                       unsigned long pfn, unsigned long vaddr,
366                                       pgprot_t prot)
367 {
368 }
369 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
370 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
371 {
372 }
373 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
374 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
375 static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
376 static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
377 static void __init pmd_advanced_tests(struct mm_struct *mm,
378                                       struct vm_area_struct *vma, pmd_t *pmdp,
379                                       unsigned long pfn, unsigned long vaddr,
380                                       pgprot_t prot, pgtable_t pgtable)
381 {
382 }
383 static void __init pud_advanced_tests(struct mm_struct *mm,
384                                       struct vm_area_struct *vma, pud_t *pudp,
385                                       unsigned long pfn, unsigned long vaddr,
386                                       pgprot_t prot)
387 {
388 }
389 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
390 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
391 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
392 {
393 }
394 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
395 {
396 }
397 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
398 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
399
400 static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
401 {
402         p4d_t p4d;
403
404         pr_debug("Validating P4D basic\n");
405         memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
406         WARN_ON(!p4d_same(p4d, p4d));
407 }
408
409 static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
410 {
411         pgd_t pgd;
412
413         pr_debug("Validating PGD basic\n");
414         memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
415         WARN_ON(!pgd_same(pgd, pgd));
416 }
417
418 #ifndef __PAGETABLE_PUD_FOLDED
419 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
420 {
421         pud_t pud = READ_ONCE(*pudp);
422
423         if (mm_pmd_folded(mm))
424                 return;
425
426         pr_debug("Validating PUD clear\n");
427         pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
428         WRITE_ONCE(*pudp, pud);
429         pud_clear(pudp);
430         pud = READ_ONCE(*pudp);
431         WARN_ON(!pud_none(pud));
432 }
433
434 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
435                                       pmd_t *pmdp)
436 {
437         pud_t pud;
438
439         if (mm_pmd_folded(mm))
440                 return;
441
442         pr_debug("Validating PUD populate\n");
443         /*
444          * This entry points to next level page table page.
445          * Hence this must not qualify as pud_bad().
446          */
447         pud_populate(mm, pudp, pmdp);
448         pud = READ_ONCE(*pudp);
449         WARN_ON(pud_bad(pud));
450 }
451 #else  /* !__PAGETABLE_PUD_FOLDED */
452 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
453 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
454                                       pmd_t *pmdp)
455 {
456 }
457 #endif /* PAGETABLE_PUD_FOLDED */
458
459 #ifndef __PAGETABLE_P4D_FOLDED
460 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
461 {
462         p4d_t p4d = READ_ONCE(*p4dp);
463
464         if (mm_pud_folded(mm))
465                 return;
466
467         pr_debug("Validating P4D clear\n");
468         p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
469         WRITE_ONCE(*p4dp, p4d);
470         p4d_clear(p4dp);
471         p4d = READ_ONCE(*p4dp);
472         WARN_ON(!p4d_none(p4d));
473 }
474
475 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
476                                       pud_t *pudp)
477 {
478         p4d_t p4d;
479
480         if (mm_pud_folded(mm))
481                 return;
482
483         pr_debug("Validating P4D populate\n");
484         /*
485          * This entry points to next level page table page.
486          * Hence this must not qualify as p4d_bad().
487          */
488         pud_clear(pudp);
489         p4d_clear(p4dp);
490         p4d_populate(mm, p4dp, pudp);
491         p4d = READ_ONCE(*p4dp);
492         WARN_ON(p4d_bad(p4d));
493 }
494
495 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
496 {
497         pgd_t pgd = READ_ONCE(*pgdp);
498
499         if (mm_p4d_folded(mm))
500                 return;
501
502         pr_debug("Validating PGD clear\n");
503         pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
504         WRITE_ONCE(*pgdp, pgd);
505         pgd_clear(pgdp);
506         pgd = READ_ONCE(*pgdp);
507         WARN_ON(!pgd_none(pgd));
508 }
509
510 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
511                                       p4d_t *p4dp)
512 {
513         pgd_t pgd;
514
515         if (mm_p4d_folded(mm))
516                 return;
517
518         pr_debug("Validating PGD populate\n");
519         /*
520          * This entry points to next level page table page.
521          * Hence this must not qualify as pgd_bad().
522          */
523         p4d_clear(p4dp);
524         pgd_clear(pgdp);
525         pgd_populate(mm, pgdp, p4dp);
526         pgd = READ_ONCE(*pgdp);
527         WARN_ON(pgd_bad(pgd));
528 }
529 #else  /* !__PAGETABLE_P4D_FOLDED */
530 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
531 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
532 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
533                                       pud_t *pudp)
534 {
535 }
536 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
537                                       p4d_t *p4dp)
538 {
539 }
540 #endif /* PAGETABLE_P4D_FOLDED */
541
542 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
543                                    unsigned long pfn, unsigned long vaddr,
544                                    pgprot_t prot)
545 {
546         pte_t pte = pfn_pte(pfn, prot);
547
548         pr_debug("Validating PTE clear\n");
549 #ifndef CONFIG_RISCV
550         pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
551 #endif
552         set_pte_at(mm, vaddr, ptep, pte);
553         barrier();
554         pte_clear(mm, vaddr, ptep);
555         pte = ptep_get(ptep);
556         WARN_ON(!pte_none(pte));
557 }
558
559 static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
560 {
561         pmd_t pmd = READ_ONCE(*pmdp);
562
563         pr_debug("Validating PMD clear\n");
564         pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
565         WRITE_ONCE(*pmdp, pmd);
566         pmd_clear(pmdp);
567         pmd = READ_ONCE(*pmdp);
568         WARN_ON(!pmd_none(pmd));
569 }
570
571 static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
572                                       pgtable_t pgtable)
573 {
574         pmd_t pmd;
575
576         pr_debug("Validating PMD populate\n");
577         /*
578          * This entry points to next level page table page.
579          * Hence this must not qualify as pmd_bad().
580          */
581         pmd_populate(mm, pmdp, pgtable);
582         pmd = READ_ONCE(*pmdp);
583         WARN_ON(pmd_bad(pmd));
584 }
585
586 static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
587 {
588         pte_t pte = pfn_pte(pfn, prot);
589
590         if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
591                 return;
592
593         pr_debug("Validating PTE special\n");
594         WARN_ON(!pte_special(pte_mkspecial(pte)));
595 }
596
597 static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
598 {
599         pte_t pte = pfn_pte(pfn, prot);
600
601         if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
602                 return;
603
604         pr_debug("Validating PTE protnone\n");
605         WARN_ON(!pte_protnone(pte));
606         WARN_ON(!pte_present(pte));
607 }
608
609 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
610 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
611 {
612         pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
613
614         if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
615                 return;
616
617         pr_debug("Validating PMD protnone\n");
618         WARN_ON(!pmd_protnone(pmd));
619         WARN_ON(!pmd_present(pmd));
620 }
621 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
622 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
623 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
624
625 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
626 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
627 {
628         pte_t pte = pfn_pte(pfn, prot);
629
630         pr_debug("Validating PTE devmap\n");
631         WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
632 }
633
634 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
635 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
636 {
637         pmd_t pmd = pfn_pmd(pfn, prot);
638
639         pr_debug("Validating PMD devmap\n");
640         WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
641 }
642
643 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
644 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
645 {
646         pud_t pud = pfn_pud(pfn, prot);
647
648         pr_debug("Validating PUD devmap\n");
649         WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
650 }
651 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
652 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
653 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
654 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
655 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
656 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
657 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
658 #else
659 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
660 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
661 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
662 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
663
664 static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
665 {
666         pte_t pte = pfn_pte(pfn, prot);
667
668         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
669                 return;
670
671         pr_debug("Validating PTE soft dirty\n");
672         WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
673         WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
674 }
675
676 static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
677 {
678         pte_t pte = pfn_pte(pfn, prot);
679
680         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
681                 return;
682
683         pr_debug("Validating PTE swap soft dirty\n");
684         WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
685         WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
686 }
687
688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
690 {
691         pmd_t pmd = pfn_pmd(pfn, prot);
692
693         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
694                 return;
695
696         pr_debug("Validating PMD soft dirty\n");
697         WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
698         WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
699 }
700
701 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
702 {
703         pmd_t pmd = pfn_pmd(pfn, prot);
704
705         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
706                 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
707                 return;
708
709         pr_debug("Validating PMD swap soft dirty\n");
710         WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
711         WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
712 }
713 #else  /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
714 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
715 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
716 {
717 }
718 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
719
720 static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
721 {
722         swp_entry_t swp;
723         pte_t pte;
724
725         pr_debug("Validating PTE swap\n");
726         pte = pfn_pte(pfn, prot);
727         swp = __pte_to_swp_entry(pte);
728         pte = __swp_entry_to_pte(swp);
729         WARN_ON(pfn != pte_pfn(pte));
730 }
731
732 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
733 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
734 {
735         swp_entry_t swp;
736         pmd_t pmd;
737
738         pr_debug("Validating PMD swap\n");
739         pmd = pfn_pmd(pfn, prot);
740         swp = __pmd_to_swp_entry(pmd);
741         pmd = __swp_entry_to_pmd(swp);
742         WARN_ON(pfn != pmd_pfn(pmd));
743 }
744 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
745 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
746 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
747
748 static void __init swap_migration_tests(void)
749 {
750         struct page *page;
751         swp_entry_t swp;
752
753         if (!IS_ENABLED(CONFIG_MIGRATION))
754                 return;
755
756         pr_debug("Validating swap migration\n");
757         /*
758          * swap_migration_tests() requires a dedicated page as it needs to
759          * be locked before creating a migration entry from it. Locking the
760          * page that actually maps kernel text ('start_kernel') can be real
761          * problematic. Lets allocate a dedicated page explicitly for this
762          * purpose that will be freed subsequently.
763          */
764         page = alloc_page(GFP_KERNEL);
765         if (!page) {
766                 pr_err("page allocation failed\n");
767                 return;
768         }
769
770         /*
771          * make_migration_entry() expects given page to be
772          * locked, otherwise it stumbles upon a BUG_ON().
773          */
774         __SetPageLocked(page);
775         swp = make_migration_entry(page, 1);
776         WARN_ON(!is_migration_entry(swp));
777         WARN_ON(!is_write_migration_entry(swp));
778
779         make_migration_entry_read(&swp);
780         WARN_ON(!is_migration_entry(swp));
781         WARN_ON(is_write_migration_entry(swp));
782
783         swp = make_migration_entry(page, 0);
784         WARN_ON(!is_migration_entry(swp));
785         WARN_ON(is_write_migration_entry(swp));
786         __ClearPageLocked(page);
787         __free_page(page);
788 }
789
790 #ifdef CONFIG_HUGETLB_PAGE
791 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
792 {
793         struct page *page;
794         pte_t pte;
795
796         pr_debug("Validating HugeTLB basic\n");
797         /*
798          * Accessing the page associated with the pfn is safe here,
799          * as it was previously derived from a real kernel symbol.
800          */
801         page = pfn_to_page(pfn);
802         pte = mk_huge_pte(page, prot);
803
804         WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
805         WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
806         WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
807
808 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
809         pte = pfn_pte(pfn, prot);
810
811         WARN_ON(!pte_huge(pte_mkhuge(pte)));
812 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
813 }
814 #else  /* !CONFIG_HUGETLB_PAGE */
815 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
816 #endif /* CONFIG_HUGETLB_PAGE */
817
818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
819 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
820 {
821         pmd_t pmd;
822
823         if (!has_transparent_hugepage())
824                 return;
825
826         pr_debug("Validating PMD based THP\n");
827         /*
828          * pmd_trans_huge() and pmd_present() must return positive after
829          * MMU invalidation with pmd_mkinvalid(). This behavior is an
830          * optimization for transparent huge page. pmd_trans_huge() must
831          * be true if pmd_page() returns a valid THP to avoid taking the
832          * pmd_lock when others walk over non transhuge pmds (i.e. there
833          * are no THP allocated). Especially when splitting a THP and
834          * removing the present bit from the pmd, pmd_trans_huge() still
835          * needs to return true. pmd_present() should be true whenever
836          * pmd_trans_huge() returns true.
837          */
838         pmd = pfn_pmd(pfn, prot);
839         WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
840
841 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
842         WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
843         WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
844 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
845 }
846
847 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
848 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
849 {
850         pud_t pud;
851
852         if (!has_transparent_hugepage())
853                 return;
854
855         pr_debug("Validating PUD based THP\n");
856         pud = pfn_pud(pfn, prot);
857         WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
858
859         /*
860          * pud_mkinvalid() has been dropped for now. Enable back
861          * these tests when it comes back with a modified pud_present().
862          *
863          * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
864          * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
865          */
866 }
867 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
868 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
869 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
870 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
871 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
872 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
873 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
874
875 static unsigned long __init get_random_vaddr(void)
876 {
877         unsigned long random_vaddr, random_pages, total_user_pages;
878
879         total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
880
881         random_pages = get_random_long() % total_user_pages;
882         random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
883
884         return random_vaddr;
885 }
886
887 static int __init debug_vm_pgtable(void)
888 {
889         struct vm_area_struct *vma;
890         struct mm_struct *mm;
891         pgd_t *pgdp;
892         p4d_t *p4dp, *saved_p4dp;
893         pud_t *pudp, *saved_pudp;
894         pmd_t *pmdp, *saved_pmdp, pmd;
895         pte_t *ptep;
896         pgtable_t saved_ptep;
897         pgprot_t prot, protnone;
898         phys_addr_t paddr;
899         unsigned long vaddr, pte_aligned, pmd_aligned;
900         unsigned long pud_aligned, p4d_aligned, pgd_aligned;
901         spinlock_t *ptl = NULL;
902
903         pr_info("Validating architecture page table helpers\n");
904         prot = vm_get_page_prot(VMFLAGS);
905         vaddr = get_random_vaddr();
906         mm = mm_alloc();
907         if (!mm) {
908                 pr_err("mm_struct allocation failed\n");
909                 return 1;
910         }
911
912         /*
913          * __P000 (or even __S000) will help create page table entries with
914          * PROT_NONE permission as required for pxx_protnone_tests().
915          */
916         protnone = __P000;
917
918         vma = vm_area_alloc(mm);
919         if (!vma) {
920                 pr_err("vma allocation failed\n");
921                 return 1;
922         }
923
924         /*
925          * PFN for mapping at PTE level is determined from a standard kernel
926          * text symbol. But pfns for higher page table levels are derived by
927          * masking lower bits of this real pfn. These derived pfns might not
928          * exist on the platform but that does not really matter as pfn_pxx()
929          * helpers will still create appropriate entries for the test. This
930          * helps avoid large memory block allocations to be used for mapping
931          * at higher page table levels.
932          */
933         paddr = __pa_symbol(&start_kernel);
934
935         pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
936         pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
937         pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
938         p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
939         pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
940         WARN_ON(!pfn_valid(pte_aligned));
941
942         pgdp = pgd_offset(mm, vaddr);
943         p4dp = p4d_alloc(mm, pgdp, vaddr);
944         pudp = pud_alloc(mm, p4dp, vaddr);
945         pmdp = pmd_alloc(mm, pudp, vaddr);
946         /*
947          * Allocate pgtable_t
948          */
949         if (pte_alloc(mm, pmdp)) {
950                 pr_err("pgtable allocation failed\n");
951                 return 1;
952         }
953
954         /*
955          * Save all the page table page addresses as the page table
956          * entries will be used for testing with random or garbage
957          * values. These saved addresses will be used for freeing
958          * page table pages.
959          */
960         pmd = READ_ONCE(*pmdp);
961         saved_p4dp = p4d_offset(pgdp, 0UL);
962         saved_pudp = pud_offset(p4dp, 0UL);
963         saved_pmdp = pmd_offset(pudp, 0UL);
964         saved_ptep = pmd_pgtable(pmd);
965
966         pte_basic_tests(pte_aligned, prot);
967         pmd_basic_tests(pmd_aligned, prot);
968         pud_basic_tests(pud_aligned, prot);
969         p4d_basic_tests(p4d_aligned, prot);
970         pgd_basic_tests(pgd_aligned, prot);
971
972         pmd_leaf_tests(pmd_aligned, prot);
973         pud_leaf_tests(pud_aligned, prot);
974
975         pte_savedwrite_tests(pte_aligned, protnone);
976         pmd_savedwrite_tests(pmd_aligned, protnone);
977
978         pte_special_tests(pte_aligned, prot);
979         pte_protnone_tests(pte_aligned, protnone);
980         pmd_protnone_tests(pmd_aligned, protnone);
981
982         pte_devmap_tests(pte_aligned, prot);
983         pmd_devmap_tests(pmd_aligned, prot);
984         pud_devmap_tests(pud_aligned, prot);
985
986         pte_soft_dirty_tests(pte_aligned, prot);
987         pmd_soft_dirty_tests(pmd_aligned, prot);
988         pte_swap_soft_dirty_tests(pte_aligned, prot);
989         pmd_swap_soft_dirty_tests(pmd_aligned, prot);
990
991         pte_swap_tests(pte_aligned, prot);
992         pmd_swap_tests(pmd_aligned, prot);
993
994         swap_migration_tests();
995
996         pmd_thp_tests(pmd_aligned, prot);
997         pud_thp_tests(pud_aligned, prot);
998
999         hugetlb_basic_tests(pte_aligned, prot);
1000
1001         /*
1002          * Page table modifying tests. They need to hold
1003          * proper page table lock.
1004          */
1005
1006         ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1007         pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1008         pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1009         pte_unmap_unlock(ptep, ptl);
1010
1011         ptl = pmd_lock(mm, pmdp);
1012         pmd_clear_tests(mm, pmdp);
1013         pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1014         pmd_huge_tests(pmdp, pmd_aligned, prot);
1015         pmd_populate_tests(mm, pmdp, saved_ptep);
1016         spin_unlock(ptl);
1017
1018         ptl = pud_lock(mm, pudp);
1019         pud_clear_tests(mm, pudp);
1020         pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1021         pud_huge_tests(pudp, pud_aligned, prot);
1022         pud_populate_tests(mm, pudp, saved_pmdp);
1023         spin_unlock(ptl);
1024
1025         spin_lock(&mm->page_table_lock);
1026         p4d_clear_tests(mm, p4dp);
1027         pgd_clear_tests(mm, pgdp);
1028         p4d_populate_tests(mm, p4dp, saved_pudp);
1029         pgd_populate_tests(mm, pgdp, saved_p4dp);
1030         spin_unlock(&mm->page_table_lock);
1031
1032         p4d_free(mm, saved_p4dp);
1033         pud_free(mm, saved_pudp);
1034         pmd_free(mm, saved_pmdp);
1035         pte_free(mm, saved_ptep);
1036
1037         vm_area_free(vma);
1038         mm_dec_nr_puds(mm);
1039         mm_dec_nr_pmds(mm);
1040         mm_dec_nr_ptes(mm);
1041         mmdrop(mm);
1042         return 0;
1043 }
1044 late_initcall(debug_vm_pgtable);