x86/mm/64: Tighten up vmalloc_fault() sanity checks on 5-level kernels
authorAndy Lutomirski <luto@kernel.org>
Thu, 25 Jan 2018 21:12:15 +0000 (13:12 -0800)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 26 Jan 2018 14:56:23 +0000 (15:56 +0100)
On a 5-level kernel, if a non-init mm has a top-level entry, it needs to
match init_mm's, but the vmalloc_fault() code skipped over the BUG_ON()
that would have checked it.

While we're at it, get rid of the rather confusing 4-level folded "pgd"
logic.

Cleans-up: b50858ce3e2a ("x86/mm/vmalloc: Add 5-level paging support")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Neil Berrington <neil.berrington@datacore.com>
Link: https://lkml.kernel.org/r/2ae598f8c279b0a29baf75df207e6f2fdddc0a1b.1516914529.git.luto@kernel.org
arch/x86/mm/fault.c

index b3e4077..800de81 100644 (file)
@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
        if (pgd_none(*pgd_ref))
                return -1;
 
-       if (pgd_none(*pgd)) {
-               set_pgd(pgd, *pgd_ref);
-               arch_flush_lazy_mmu_mode();
-       } else if (CONFIG_PGTABLE_LEVELS > 4) {
-               /*
-                * With folded p4d, pgd_none() is always false, so the pgd may
-                * point to an empty page table entry and pgd_page_vaddr()
-                * will return garbage.
-                *
-                * We will do the correct sanity check on the p4d level.
-                */
-               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+       if (CONFIG_PGTABLE_LEVELS > 4) {
+               if (pgd_none(*pgd)) {
+                       set_pgd(pgd, *pgd_ref);
+                       arch_flush_lazy_mmu_mode();
+               } else {
+                       BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+               }
        }
 
        /* With 4-level paging, copying happens on the p4d level. */
@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
        if (p4d_none(*p4d_ref))
                return -1;
 
-       if (p4d_none(*p4d)) {
+       if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
                set_p4d(p4d, *p4d_ref);
                arch_flush_lazy_mmu_mode();
        } else {
@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
         * Below here mismatches are bugs because these lower tables
         * are shared:
         */
+       BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
 
        pud = pud_offset(p4d, address);
        pud_ref = pud_offset(p4d_ref, address);