riscv: Enable KFENCE for riscv64
authorLiu Shixin <liushixin2@huawei.com>
Tue, 15 Jun 2021 03:07:34 +0000 (11:07 +0800)
committerPalmer Dabbelt <palmerdabbelt@google.com>
Thu, 1 Jul 2021 03:55:41 +0000 (20:55 -0700)
Add architecture specific implementation details for KFENCE and enable
KFENCE for the riscv64 architecture. In particular, this implements the
required interface in <asm/kfence.h>.

KFENCE requires that attributes for pages from its memory pool can
individually be set. Therefore, force the kfence pool to be mapped at
page granularity.

Testing this patch using the testcases in kfence_test.c and all passed.

Signed-off-by: Liu Shixin <liushixin2@huawei.com>
Acked-by: Marco Elver <elver@google.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
arch/riscv/Kconfig
arch/riscv/include/asm/kfence.h [new file with mode: 0644]
arch/riscv/mm/fault.c

index a71b1d2..9ff3633 100644 (file)
@@ -64,6 +64,7 @@ config RISCV
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN if MMU && 64BIT
        select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
+       select HAVE_ARCH_KFENCE if MMU && 64BIT
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..d887a54
--- /dev/null
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_KFENCE_H
+#define _ASM_RISCV_KFENCE_H
+
+#include <linux/kfence.h>
+#include <linux/pfn.h>
+#include <asm-generic/pgalloc.h>
+#include <asm/pgtable.h>
+
+static inline int split_pmd_page(unsigned long addr)
+{
+       int i;
+       unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
+       pmd_t *pmd = pmd_off_k(addr);
+       pte_t *pte = pte_alloc_one_kernel(&init_mm);
+
+       if (!pte)
+               return -ENOMEM;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
+       set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
+
+       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+       return 0;
+}
+
+static inline bool arch_kfence_init_pool(void)
+{
+       int ret;
+       unsigned long addr;
+       pmd_t *pmd;
+
+       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+            addr += PAGE_SIZE) {
+               pmd = pmd_off_k(addr);
+
+               if (pmd_leaf(*pmd)) {
+                       ret = split_pmd_page(addr);
+                       if (ret)
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       pte_t *pte = virt_to_kpte(addr);
+
+       if (protect)
+               set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+       else
+               set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+       return true;
+}
+
+#endif /* _ASM_RISCV_KFENCE_H */
index 096463c..aa08dd2 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/signal.h>
 #include <linux/uaccess.h>
 #include <linux/kprobes.h>
+#include <linux/kfence.h>
 
 #include <asm/ptrace.h>
 #include <asm/tlbflush.h>
@@ -45,7 +46,15 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
         * Oops. The kernel tried to access some bad page. We'll have to
         * terminate things with extreme prejudice.
         */
-       msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request";
+       if (addr < PAGE_SIZE)
+               msg = "NULL pointer dereference";
+       else {
+               if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
+                       return;
+
+               msg = "paging request";
+       }
+
        die_kernel_fault(msg, addr, regs);
 }