x86/mm: Do not auto-massage page protections
authorDave Hansen <dave.hansen@linux.intel.com>
Fri, 6 Apr 2018 20:55:09 +0000 (13:55 -0700)
committerIngo Molnar <mingo@kernel.org>
Thu, 12 Apr 2018 07:04:22 +0000 (09:04 +0200)
A PTE is constructed from a physical address and a pgprotval_t.
__PAGE_KERNEL, for instance, is a pgprot_t and must be converted
into a pgprotval_t before it can be used to create a PTE.  This is
done implicitly within functions like pfn_pte() by massage_pgprot().

However, this makes it very challenging to set bits (and keep them
set) if your bit is being filtered out by massage_pgprot().

This moves the bit filtering out of pfn_pte() and friends.  For
users of PAGE_KERNEL*, filtering will be done automatically inside
those macros but for users of __PAGE_KERNEL*, they need to do their
own filtering now.

Note that we also just move pfn_pte/pmd/pud() over to check_pgprot()
instead of massage_pgprot().  This way, we still *look* for
unsupported bits and properly warn about them if we find them.  This
might happen if an unfiltered __PAGE_KERNEL* value was passed in,
for instance.

- printk format warning fix from: Arnd Bergmann <arnd@arndb.de>
- boot crash fix from:            Tom Lendacky <thomas.lendacky@amd.com>
- crash bisected by:              Mike Galbraith <efault@gmx.de>

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reported-and-fixed-by: Arnd Bergmann <arnd@arndb.de>
Fixed-by: Tom Lendacky <thomas.lendacky@amd.com>
Bisected-by: Mike Galbraith <efault@gmx.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180406205509.77E1D7F6@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/boot/compressed/kaslr.c
arch/x86/include/asm/pgtable.h
arch/x86/kernel/head64.c
arch/x86/kernel/ldt.c
arch/x86/mm/ident_map.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/pgtable.c
arch/x86/power/hibernate_64.c

index 66e42a0..a0a50b9 100644 (file)
@@ -54,6 +54,9 @@ unsigned int ptrs_per_p4d __ro_after_init = 1;
 
 extern unsigned long get_cmd_line_ptr(void);
 
+/* Used by PAGE_KERN* macros: */
+pteval_t __default_kernel_pte_mask __read_mostly = ~0;
+
 /* Simplified build-specific string for starting entropy. */
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
                LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
index 89d5c88..5f49b4f 100644 (file)
@@ -526,22 +526,39 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
        return protval;
 }
 
+static inline pgprotval_t check_pgprot(pgprot_t pgprot)
+{
+       pgprotval_t massaged_val = massage_pgprot(pgprot);
+
+       /* mmdebug.h can not be included here because of dependencies */
+#ifdef CONFIG_DEBUG_VM
+       WARN_ONCE(pgprot_val(pgprot) != massaged_val,
+                 "attempted to set unsupported pgprot: %016llx "
+                 "bits: %016llx supported: %016llx\n",
+                 (u64)pgprot_val(pgprot),
+                 (u64)pgprot_val(pgprot) ^ massaged_val,
+                 (u64)__supported_pte_mask);
+#endif
+
+       return massaged_val;
+}
+
 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 {
        return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
-                    massage_pgprot(pgprot));
+                    check_pgprot(pgprot));
 }
 
 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 {
        return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
-                    massage_pgprot(pgprot));
+                    check_pgprot(pgprot));
 }
 
 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
 {
        return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
-                    massage_pgprot(pgprot));
+                    check_pgprot(pgprot));
 }
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -553,7 +570,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
         * the newprot (if present):
         */
        val &= _PAGE_CHG_MASK;
-       val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
+       val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
 
        return __pte(val);
 }
@@ -563,7 +580,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
        pmdval_t val = pmd_val(pmd);
 
        val &= _HPAGE_CHG_MASK;
-       val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+       val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 
        return __pmd(val);
 }
index 0c855de..0c408f8 100644 (file)
@@ -195,6 +195,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
        pud[i + 1] = (pudval_t)pmd + pgtable_flags;
 
        pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+       /* Filter out unsupported __PAGE_KERNEL_* bits: */
+       pmd_entry &= __supported_pte_mask;
        pmd_entry += sme_get_me_mask();
        pmd_entry +=  physaddr;
 
index 26d713e..d41d896 100644 (file)
@@ -145,6 +145,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
                unsigned long offset = i << PAGE_SHIFT;
                const void *src = (char *)ldt->entries + offset;
                unsigned long pfn;
+               pgprot_t pte_prot;
                pte_t pte, *ptep;
 
                va = (unsigned long)ldt_slot_va(slot) + offset;
@@ -163,7 +164,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
                 * target via some kernel interface which misses a
                 * permission check.
                 */
-               pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
+               pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
+               /* Filter out unsuppored __PAGE_KERNEL* bits: */
+               pgprot_val(pte_prot) |= __supported_pte_mask;
+               pte = pfn_pte(pfn, pte_prot);
                set_pte_at(mm, va, ptep, pte);
                pte_unmap_unlock(ptep, ptl);
        }
index 9aa22be..a2f0c7e 100644 (file)
@@ -98,6 +98,9 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
        if (!info->kernpg_flag)
                info->kernpg_flag = _KERNPG_TABLE;
 
+       /* Filter out unsupported __PAGE_KERNEL_* bits: */
+       info->kernpg_flag &= __default_kernel_pte_mask;
+
        for (; addr < end; addr = next) {
                pgd_t *pgd = pgd_page + pgd_index(addr);
                p4d_t *p4d;
index ada98b3..b3294d3 100644 (file)
@@ -44,6 +44,9 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
                return ret;
 
        *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
+       /* Filter out unsupported __PAGE_KERNEL* bits: */
+       pgprot_val(*prot) &= __default_kernel_pte_mask;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(iomap_create_wc);
@@ -88,6 +91,9 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
                prot = __pgprot(__PAGE_KERNEL |
                                cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
 
+       /* Filter out unsupported __PAGE_KERNEL* bits: */
+       pgprot_val(prot) &= __default_kernel_pte_mask;
+
        return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
index e2db83b..c63a545 100644 (file)
@@ -816,6 +816,9 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
        }
        pte = early_ioremap_pte(addr);
 
+       /* Sanitize 'prot' against any unsupported bits: */
+       pgprot_val(flags) &= __default_kernel_pte_mask;
+
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
index d8ff013..980dbeb 100644 (file)
@@ -269,6 +269,12 @@ void __init kasan_early_init(void)
        pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
        p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
 
+       /* Mask out unsupported __PAGE_KERNEL bits: */
+       pte_val &= __default_kernel_pte_mask;
+       pmd_val &= __default_kernel_pte_mask;
+       pud_val &= __default_kernel_pte_mask;
+       p4d_val &= __default_kernel_pte_mask;
+
        for (i = 0; i < PTRS_PER_PTE; i++)
                kasan_zero_pte[i] = __pte(pte_val);
 
@@ -371,7 +377,13 @@ void __init kasan_init(void)
         */
        memset(kasan_zero_page, 0, PAGE_SIZE);
        for (i = 0; i < PTRS_PER_PTE; i++) {
-               pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
+               pte_t pte;
+               pgprot_t prot;
+
+               prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
+               pgprot_val(prot) &= __default_kernel_pte_mask;
+
+               pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
                set_pte(&kasan_zero_pte[i], pte);
        }
        /* Flush TLBs again to be sure that write protection applied. */
index 34cda7e..d10a40a 100644 (file)
@@ -583,6 +583,9 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
                       pgprot_t flags)
 {
+       /* Sanitize 'prot' against any unsupported bits: */
+       pgprot_val(flags) &= __default_kernel_pte_mask;
+
        __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 }
 
index 74a5329..48b14b5 100644 (file)
@@ -51,6 +51,12 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
        pmd_t *pmd;
        pud_t *pud;
        p4d_t *p4d = NULL;
+       pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
+       pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
+
+       /* Filter out unsupported __PAGE_KERNEL* bits: */
+       pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
+       pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;
 
        /*
         * The new mapping only has to cover the page containing the image
@@ -81,15 +87,19 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
                return -ENOMEM;
 
        set_pmd(pmd + pmd_index(restore_jump_address),
-               __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+               __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
        set_pud(pud + pud_index(restore_jump_address),
-               __pud(__pa(pmd) | _KERNPG_TABLE));
+               __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
        if (p4d) {
-               set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
-               set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
+               p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
+               pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
+
+               set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
+               set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
        } else {
                /* No p4d for 4-level paging: point the pgd to the pud page table */
-               set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
+               pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
+               set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
        }
 
        return 0;