Merge branch 'for-next/kernel-ptrauth' into for-next/core
authorCatalin Marinas <catalin.marinas@arm.com>
Wed, 25 Mar 2020 11:11:08 +0000 (11:11 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 25 Mar 2020 11:11:08 +0000 (11:11 +0000)
* for-next/kernel-ptrauth:
  : Return address signing - in-kernel support
  arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH
  lkdtm: arm64: test kernel pointer authentication
  arm64: compile the kernel with ptrauth return address signing
  kconfig: Add support for 'as-option'
  arm64: suspend: restore the kernel ptrauth keys
  arm64: __show_regs: strip PAC from lr in printk
  arm64: unwind: strip PAC from kernel addresses
  arm64: mask PAC bits of __builtin_return_address
  arm64: initialize ptrauth keys for kernel booting task
  arm64: initialize and switch ptrauth kernel keys
  arm64: enable ptrauth earlier
  arm64: cpufeature: handle conflicts based on capability
  arm64: cpufeature: Move cpu capability helpers inside C file
  arm64: ptrauth: Add bootup/runtime flags for __cpu_setup
  arm64: install user ptrauth keys at kernel exit time
  arm64: rename ptrauth key structures to be user-specific
  arm64: cpufeature: add pointer auth meta-capabilities
  arm64: cpufeature: Fix meta-capability cpufeature check

26 files changed:
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/asm_pointer_auth.h [new file with mode: 0644]
arch/arm64/include/asm/compiler.h [new file with mode: 0644]
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/pointer_auth.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/stackprotector.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/pointer_auth.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/mm/proc.S
drivers/misc/lkdtm/bugs.c
drivers/misc/lkdtm/core.c
drivers/misc/lkdtm/lkdtm.h
include/linux/stackprotector.h
scripts/Kconfig.include

index 8889ce7..e6712b6 100644 (file)
@@ -118,6 +118,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_COMPILER_H
        select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
@@ -1501,6 +1502,9 @@ config ARM64_PTR_AUTH
        bool "Enable support for pointer authentication"
        default y
        depends on !KVM || ARM64_VHE
+       depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+       depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
+       depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
          Pointer authentication (part of the ARMv8.3 Extensions) provides
          instructions for signing and authenticating pointers against secret
@@ -1508,16 +1512,45 @@ config ARM64_PTR_AUTH
          and other attacks.
 
          This option enables these instructions at EL0 (i.e. for userspace).
-
          Choosing this option will cause the kernel to initialise secret keys
          for each process at exec() time, with these keys being
          context-switched along with the process.
 
+         If the compiler supports the -mbranch-protection or
+         -msign-return-address flag (e.g. GCC 7 or later), then this option
+         will also cause the kernel itself to be compiled with return address
+         protection. In this case, and if the target hardware is known to
+         support pointer authentication, then CONFIG_STACKPROTECTOR can be
+         disabled with minimal loss of protection.
+
          The feature is detected at runtime. If the feature is not present in
          hardware it will not be advertised to userspace/KVM guest nor will it
          be enabled. However, KVM guest also require VHE mode and hence
          CONFIG_ARM64_VHE=y option to use this feature.
 
+         If the feature is present on the boot CPU but not on a late CPU, then
+         the late CPU will be parked. Also, if the boot CPU does not have
+         address auth and the late CPU has then the late CPU will still boot
+         but with the feature disabled. On such a system, this option should
+         not be selected.
+
+         This feature works with FUNCTION_GRAPH_TRACER option only if
+         DYNAMIC_FTRACE_WITH_REGS is enabled.
+
+config CC_HAS_BRANCH_PROT_PAC_RET
+       # GCC 9 or later, clang 8 or later
+       def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
+
+config CC_HAS_SIGN_RETURN_ADDRESS
+       # GCC 7, 8
+       def_bool $(cc-option,-msign-return-address=all)
+
+config AS_HAS_PAC
+       def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
+
+config AS_HAS_CFI_NEGATE_RA_STATE
+       def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
+
 endmenu
 
 menu "ARMv8.4 architectural features"
index dca1a97..f15f92b 100644 (file)
@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0
                                        include/generated/asm-offsets.h))
 endif
 
+ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
+branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
+# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
+# compiler to generate them and consequently to break the single image contract
+# we pass it only to the assembler. This option is utilized only in case of non
+# integrated assemblers.
+branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+KBUILD_CFLAGS += $(branch-prot-flags-y)
+endif
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
new file mode 100644 (file)
index 0000000..ce2a848
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ASM_POINTER_AUTH_H
+#define __ASM_ASM_POINTER_AUTH_H
+
+#include <asm/alternative.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+       .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+       mov     \tmp1, #THREAD_KEYS_USER
+       add     \tmp1, \tsk, \tmp1
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+       b       .Laddr_auth_skip_\@
+alternative_else_nop_endif
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+       msr_s   SYS_APIAKEYLO_EL1, \tmp2
+       msr_s   SYS_APIAKEYHI_EL1, \tmp3
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
+       msr_s   SYS_APIBKEYLO_EL1, \tmp2
+       msr_s   SYS_APIBKEYHI_EL1, \tmp3
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
+       msr_s   SYS_APDAKEYLO_EL1, \tmp2
+       msr_s   SYS_APDAKEYHI_EL1, \tmp3
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
+       msr_s   SYS_APDBKEYLO_EL1, \tmp2
+       msr_s   SYS_APDBKEYHI_EL1, \tmp3
+.Laddr_auth_skip_\@:
+alternative_if ARM64_HAS_GENERIC_AUTH
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
+       msr_s   SYS_APGAKEYLO_EL1, \tmp2
+       msr_s   SYS_APGAKEYHI_EL1, \tmp3
+alternative_else_nop_endif
+       .endm
+
+       .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
+alternative_if ARM64_HAS_ADDRESS_AUTH
+       mov     \tmp1, #THREAD_KEYS_KERNEL
+       add     \tmp1, \tsk, \tmp1
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
+       msr_s   SYS_APIAKEYLO_EL1, \tmp2
+       msr_s   SYS_APIAKEYHI_EL1, \tmp3
+       .if     \sync == 1
+       isb
+       .endif
+alternative_else_nop_endif
+       .endm
+
+#else /* CONFIG_ARM64_PTR_AUTH */
+
+       .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+       .endm
+
+       .macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
+       .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
new file mode 100644 (file)
index 0000000..eece20d
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_COMPILER_H
+#define __ASM_COMPILER_H
+
+#if defined(CONFIG_ARM64_PTR_AUTH)
+
+/*
+ * The EL0/EL1 pointer bits used by a pointer authentication code.
+ * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
+ */
+#define ptrauth_user_pac_mask()                GENMASK_ULL(54, vabits_actual)
+#define ptrauth_kernel_pac_mask()      GENMASK_ULL(63, vabits_actual)
+
+/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
+#define ptrauth_clear_pac(ptr)                                         \
+       ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) :      \
+                              (ptr & ~ptrauth_user_pac_mask()))
+
+#define __builtin_return_address(val)                                  \
+       (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_COMPILER_H */
index 185e44a..8eb5a08 100644 (file)
@@ -59,7 +59,9 @@
 #define ARM64_HAS_E0PD                         49
 #define ARM64_HAS_RNG                          50
 #define ARM64_HAS_AMU_EXTN                     51
+#define ARM64_HAS_ADDRESS_AUTH                 52
+#define ARM64_HAS_GENERIC_AUTH                 53
 
-#define ARM64_NCAPS                            52
+#define ARM64_NCAPS                            54
 
 #endif /* __ASM_CPUCAPS_H */
index e75f7df..09eb0da 100644 (file)
@@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  *     In some non-typical cases either both (a) and (b), or neither,
  *     should be permitted. This can be described by including neither
  *     or both flags in the capability's type field.
+ *
+ *     In case of a conflict, the CPU is prevented from booting. If the
+ *     ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
+ *     then a kernel panic is triggered.
  */
 
 
@@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU    ((u16)BIT(4))
 /* Is it safe for a late CPU to miss this capability when system has it */
 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU     ((u16)BIT(5))
+/* Panic when a conflict is detected */
+#define ARM64_CPUCAP_PANIC_ON_CONFLICT         ((u16)BIT(6))
 
 /*
  * CPU errata workarounds that need to be enabled at boot time if one or
@@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
 
 /*
  * CPU feature used early in the boot based on the boot CPU. All secondary
- * CPUs must match the state of the capability as detected by the boot CPU.
+ * CPUs must match the state of the capability as detected by the boot CPU. In
+ * case of a conflict, a kernel panic is triggered.
+ */
+#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE           \
+       (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
+
+/*
+ * CPU feature used early in the boot based on the boot CPU. It is safe for a
+ * late CPU to have this feature even though the boot CPU hasn't enabled it,
+ * although the feature will not be used by Linux in this case. If the boot CPU
+ * has enabled this feature already, then every late CPU must have it.
  */
-#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
+#define ARM64_CPUCAP_BOOT_CPU_FEATURE                  \
+       (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
 
 struct arm64_cpu_capabilities {
        const char *desc;
@@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
        return cap->type & ARM64_CPUCAP_SCOPE_MASK;
 }
 
-static inline bool
-cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
-{
-       return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
-}
-
-static inline bool
-cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
-{
-       return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
-}
-
 /*
  * Generic helper for handling capabilties with multiple (match,enable) pairs
  * of call backs, sharing the same capability bit.
@@ -654,15 +659,13 @@ static inline bool system_supports_cnp(void)
 static inline bool system_supports_address_auth(void)
 {
        return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
-               (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
-                cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
+               cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
 }
 
 static inline bool system_supports_generic_auth(void)
 {
        return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
-               (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
-                cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
+               cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
 }
 
 static inline bool system_uses_irq_prio_masking(void)
index 7a24bad..70c4715 100644 (file)
@@ -22,7 +22,7 @@ struct ptrauth_key {
  * We give each process its own keys, which are shared by all threads. The keys
  * are inherited upon fork(), and reinitialised upon exec*().
  */
-struct ptrauth_keys {
+struct ptrauth_keys_user {
        struct ptrauth_key apia;
        struct ptrauth_key apib;
        struct ptrauth_key apda;
@@ -30,7 +30,11 @@ struct ptrauth_keys {
        struct ptrauth_key apga;
 };
 
-static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
+struct ptrauth_keys_kernel {
+       struct ptrauth_key apia;
+};
+
+static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
 {
        if (system_supports_address_auth()) {
                get_random_bytes(&keys->apia, sizeof(keys->apia));
@@ -50,48 +54,38 @@ do {                                                                \
        write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1);     \
 } while (0)
 
-static inline void ptrauth_keys_switch(struct ptrauth_keys *keys)
+static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
 {
-       if (system_supports_address_auth()) {
-               __ptrauth_key_install(APIA, keys->apia);
-               __ptrauth_key_install(APIB, keys->apib);
-               __ptrauth_key_install(APDA, keys->apda);
-               __ptrauth_key_install(APDB, keys->apdb);
-       }
+       if (system_supports_address_auth())
+               get_random_bytes(&keys->apia, sizeof(keys->apia));
+}
 
-       if (system_supports_generic_auth())
-               __ptrauth_key_install(APGA, keys->apga);
+static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
+{
+       if (system_supports_address_auth())
+               __ptrauth_key_install(APIA, keys->apia);
 }
 
 extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
 
-/*
- * The EL0 pointer bits used by a pointer authentication code.
- * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
- */
-#define ptrauth_user_pac_mask()        GENMASK(54, vabits_actual)
-
-/* Only valid for EL0 TTBR0 instruction pointers */
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
 {
-       return ptr & ~ptrauth_user_pac_mask();
+       return ptrauth_clear_pac(ptr);
 }
 
 #define ptrauth_thread_init_user(tsk)                                  \
-do {                                                                   \
-       struct task_struct *__ptiu_tsk = (tsk);                         \
-       ptrauth_keys_init(&__ptiu_tsk->thread.keys_user);               \
-       ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user);             \
-} while (0)
-
-#define ptrauth_thread_switch(tsk)     \
-       ptrauth_keys_switch(&(tsk)->thread.keys_user)
+       ptrauth_keys_init_user(&(tsk)->thread.keys_user)
+#define ptrauth_thread_init_kernel(tsk)                                        \
+       ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_thread_switch_kernel(tsk)                              \
+       ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
 
 #else /* CONFIG_ARM64_PTR_AUTH */
 #define ptrauth_prctl_reset_keys(tsk, arg)     (-EINVAL)
 #define ptrauth_strip_insn_pac(lr)     (lr)
 #define ptrauth_thread_init_user(tsk)
-#define ptrauth_thread_switch(tsk)
+#define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_kernel(tsk)
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
 #endif /* __ASM_POINTER_AUTH_H */
index 5ba6320..4c77da5 100644 (file)
@@ -146,7 +146,8 @@ struct thread_struct {
        unsigned long           fault_code;     /* ESR_EL1 value */
        struct debug_info       debug;          /* debugging */
 #ifdef CONFIG_ARM64_PTR_AUTH
-       struct ptrauth_keys     keys_user;
+       struct ptrauth_keys_user        keys_user;
+       struct ptrauth_keys_kernel      keys_kernel;
 #endif
 };
 
index a0c8a0b..40d5ba0 100644 (file)
 #define CPU_STUCK_REASON_52_BIT_VA     (UL(1) << CPU_STUCK_REASON_SHIFT)
 #define CPU_STUCK_REASON_NO_GRAN       (UL(2) << CPU_STUCK_REASON_SHIFT)
 
+/* Possible options for __cpu_setup */
+/* Option to setup primary cpu */
+#define ARM64_CPU_BOOT_PRIMARY         (1)
+/* Option to setup secondary cpus */
+#define ARM64_CPU_BOOT_SECONDARY       (2)
+/* Option to setup cpus for different cpu run time services */
+#define ARM64_CPU_RUNTIME              (3)
+
 #ifndef __ASSEMBLY__
 
 #include <asm/percpu.h>
@@ -30,6 +38,7 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
+#include <asm/pointer_auth.h>
 
 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
 
@@ -87,6 +96,9 @@ asmlinkage void secondary_start_kernel(void);
 struct secondary_data {
        void *stack;
        struct task_struct *task;
+#ifdef CONFIG_ARM64_PTR_AUTH
+       struct ptrauth_keys_kernel ptrauth_key;
+#endif
        long status;
 };
 
index 5884a2b..7263e0b 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/random.h>
 #include <linux/version.h>
+#include <asm/pointer_auth.h>
 
 extern unsigned long __stack_chk_guard;
 
@@ -26,6 +27,7 @@ extern unsigned long __stack_chk_guard;
  */
 static __always_inline void boot_init_stack_canary(void)
 {
+#if defined(CONFIG_STACKPROTECTOR)
        unsigned long canary;
 
        /* Try to get a semi random initial value. */
@@ -36,6 +38,9 @@ static __always_inline void boot_init_stack_canary(void)
        current->stack_canary = canary;
        if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
                __stack_chk_guard = current->stack_canary;
+#endif
+       ptrauth_thread_init_kernel(current);
+       ptrauth_thread_switch_kernel(current);
 }
 
 #endif /* _ASM_STACKPROTECTOR_H */
index a5bdce8..9981a0a 100644 (file)
@@ -40,6 +40,10 @@ int main(void)
 #endif
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,   offsetof(struct task_struct, thread.cpu_context));
+#ifdef CONFIG_ARM64_PTR_AUTH
+  DEFINE(THREAD_KEYS_USER,     offsetof(struct task_struct, thread.keys_user));
+  DEFINE(THREAD_KEYS_KERNEL,   offsetof(struct task_struct, thread.keys_kernel));
+#endif
   BLANK();
   DEFINE(S_X0,                 offsetof(struct pt_regs, regs[0]));
   DEFINE(S_X2,                 offsetof(struct pt_regs, regs[2]));
@@ -88,6 +92,9 @@ int main(void)
   BLANK();
   DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
+#ifdef CONFIG_ARM64_PTR_AUTH
+  DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key));
+#endif
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,         offsetof(struct kvm_vcpu, arch.ctxt));
@@ -127,6 +134,15 @@ int main(void)
 #ifdef CONFIG_ARM_SDE_INTERFACE
   DEFINE(SDEI_EVENT_INTREGS,   offsetof(struct sdei_registered_event, interrupted_regs));
   DEFINE(SDEI_EVENT_PRIORITY,  offsetof(struct sdei_registered_event, priority));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+  DEFINE(PTRAUTH_USER_KEY_APIA,                offsetof(struct ptrauth_keys_user, apia));
+  DEFINE(PTRAUTH_USER_KEY_APIB,                offsetof(struct ptrauth_keys_user, apib));
+  DEFINE(PTRAUTH_USER_KEY_APDA,                offsetof(struct ptrauth_keys_user, apda));
+  DEFINE(PTRAUTH_USER_KEY_APDB,                offsetof(struct ptrauth_keys_user, apdb));
+  DEFINE(PTRAUTH_USER_KEY_APGA,                offsetof(struct ptrauth_keys_user, apga));
+  DEFINE(PTRAUTH_KERNEL_KEY_APIA,      offsetof(struct ptrauth_keys_kernel, apia));
+  BLANK();
 #endif
   return 0;
 }
index 38ebad8..9fac745 100644 (file)
@@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 
 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
 
+static bool __system_matches_cap(unsigned int n);
+
 /*
  * NOTE: Any changes to the visibility of features should be kept in
  * sync with the documentation of the CPU feature register ABI.
@@ -1368,10 +1370,18 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
+                            int __unused)
+{
+       return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
+              __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+}
+
+static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
+                            int __unused)
 {
-       sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
-                                      SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+       return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
+              __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
 }
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
@@ -1399,6 +1409,25 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
 }
 #endif
 
+/* Internal helper functions to match cpu capability type */
+static bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -1662,24 +1691,27 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "Address authentication (architected algorithm)",
                .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64ISAR1_APA_SHIFT,
                .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
                .matches = has_cpuid_feature,
-               .cpu_enable = cpu_enable_address_auth,
        },
        {
                .desc = "Address authentication (IMP DEF algorithm)",
                .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64ISAR1_API_SHIFT,
                .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
                .matches = has_cpuid_feature,
-               .cpu_enable = cpu_enable_address_auth,
+       },
+       {
+               .capability = ARM64_HAS_ADDRESS_AUTH,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+               .matches = has_address_auth,
        },
        {
                .desc = "Generic authentication (architected algorithm)",
@@ -1701,6 +1733,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
                .matches = has_cpuid_feature,
        },
+       {
+               .capability = ARM64_HAS_GENERIC_AUTH,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_generic_auth,
+       },
 #endif /* CONFIG_ARM64_PTR_AUTH */
 #ifdef CONFIG_ARM64_PSEUDO_NMI
        {
@@ -2050,10 +2087,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
  * Run through the list of capabilities to check for conflicts.
  * If the system has already detected a capability, take necessary
  * action on this CPU.
- *
- * Returns "false" on conflicts.
  */
-static bool verify_local_cpu_caps(u16 scope_mask)
+static void verify_local_cpu_caps(u16 scope_mask)
 {
        int i;
        bool cpu_has_cap, system_has_cap;
@@ -2098,10 +2133,12 @@ static bool verify_local_cpu_caps(u16 scope_mask)
                pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
                        smp_processor_id(), caps->capability,
                        caps->desc, system_has_cap, cpu_has_cap);
-               return false;
-       }
 
-       return true;
+               if (cpucap_panic_on_conflict(caps))
+                       cpu_panic_kernel();
+               else
+                       cpu_die_early();
+       }
 }
 
 /*
@@ -2111,12 +2148,8 @@ static bool verify_local_cpu_caps(u16 scope_mask)
 static void check_early_cpu_features(void)
 {
        verify_cpu_asid_bits();
-       /*
-        * Early features are used by the kernel already. If there
-        * is a conflict, we cannot proceed further.
-        */
-       if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
-               cpu_panic_kernel();
+
+       verify_local_cpu_caps(SCOPE_BOOT_CPU);
 }
 
 static void
@@ -2164,8 +2197,7 @@ static void verify_local_cpu_capabilities(void)
         * check_early_cpu_features(), as they need to be verified
         * on all secondary CPUs.
         */
-       if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
-               cpu_die_early();
+       verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
 
        verify_local_elf_hwcaps(arm64_elf_hwcaps);
 
@@ -2216,6 +2248,23 @@ bool this_cpu_has_cap(unsigned int n)
        return false;
 }
 
+/*
+ * This helper function is used in a narrow window when,
+ * - The system wide safe registers are set with all the SMP CPUs and,
+ * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
+ * In all other cases cpus_have_{const_}cap() should be used.
+ */
+static bool __system_matches_cap(unsigned int n)
+{
+       if (n < ARM64_NCAPS) {
+               const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+               if (cap)
+                       return cap->matches(cap, SCOPE_SYSTEM);
+       }
+       return false;
+}
+
 void cpu_set_feature(unsigned int num)
 {
        WARN_ON(num >= MAX_CPU_FEATURES);
@@ -2288,7 +2337,7 @@ void __init setup_cpu_features(void)
 static bool __maybe_unused
 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 {
-       return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
+       return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
 }
 
 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
index e5d4e30..ddcde09 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
 #include <asm/cpufeature.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
@@ -177,6 +178,7 @@ alternative_cb_end
 
        apply_ssbd 1, x22, x23
 
+       ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_current_task tsk
@@ -341,6 +343,9 @@ alternative_else_nop_endif
        msr     cntkctl_el1, x1
 4:
 #endif
+       /* No kernel C function calls after this as user keys are set. */
+       ptrauth_keys_install_user tsk, x0, x1, x2
+
        apply_ssbd 0, x0, x1
        .endif
 
@@ -895,6 +900,7 @@ SYM_FUNC_START(cpu_switch_to)
        ldr     lr, [x8]
        mov     sp, x9
        msr     sp_el0, x1
+       ptrauth_keys_install_kernel x1, 1, x8, x9, x10
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
index 2f7ea6d..57a9103 100644 (file)
@@ -118,6 +118,7 @@ SYM_CODE_START(stext)
         * On return, the CPU will be ready for the MMU to be turned on and
         * the TCR will have been set.
         */
+       mov     x0, #ARM64_CPU_BOOT_PRIMARY
        bl      __cpu_setup                     // initialise processor
        b       __primary_switch
 SYM_CODE_END(stext)
@@ -716,6 +717,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
         * Common entry point for secondary CPUs.
         */
        bl      __cpu_secondary_check52bitva
+       mov     x0, #ARM64_CPU_BOOT_SECONDARY
        bl      __cpu_setup                     // initialise processor
        adrp    x1, swapper_pg_dir
        bl      __enable_mmu
index c507b58..1e77736 100644 (file)
@@ -9,7 +9,7 @@
 
 int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
 {
-       struct ptrauth_keys *keys = &tsk->thread.keys_user;
+       struct ptrauth_keys_user *keys = &tsk->thread.keys_user;
        unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
                                      PR_PAC_APDAKEY | PR_PAC_APDBKEY;
        unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
@@ -18,8 +18,7 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
                return -EINVAL;
 
        if (!arg) {
-               ptrauth_keys_init(keys);
-               ptrauth_keys_switch(keys);
+               ptrauth_keys_init_user(keys);
                return 0;
        }
 
@@ -41,7 +40,5 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
        if (arg & PR_PAC_APGAKEY)
                get_random_bytes(&keys->apga, sizeof(keys->apga));
 
-       ptrauth_keys_switch(keys);
-
        return 0;
 }
index 0062605..cacae29 100644 (file)
@@ -262,7 +262,7 @@ void __show_regs(struct pt_regs *regs)
 
        if (!user_mode(regs)) {
                printk("pc : %pS\n", (void *)regs->pc);
-               printk("lr : %pS\n", (void *)lr);
+               printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
        } else {
                printk("pc : %016llx\n", regs->pc);
                printk("lr : %016llx\n", lr);
@@ -376,6 +376,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
         */
        fpsimd_flush_task_state(p);
 
+       ptrauth_thread_init_kernel(p);
+
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->regs[0] = 0;
@@ -512,7 +514,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        contextidr_thread_switch(next);
        entry_task_switch(next);
        uao_thread_switch(next);
-       ptrauth_thread_switch(next);
        ssbs_thread_switch(next);
 
        /*
index cd6e5fa..b3d3005 100644 (file)
@@ -999,7 +999,7 @@ static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
 }
 
 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
-                                    const struct ptrauth_keys *keys)
+                                    const struct ptrauth_keys_user *keys)
 {
        ukeys->apiakey = pac_key_to_user(&keys->apia);
        ukeys->apibkey = pac_key_to_user(&keys->apib);
@@ -1007,7 +1007,7 @@ static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
        ukeys->apdbkey = pac_key_to_user(&keys->apdb);
 }
 
-static void pac_address_keys_from_user(struct ptrauth_keys *keys,
+static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
                                       const struct user_pac_address_keys *ukeys)
 {
        keys->apia = pac_key_from_user(ukeys->apiakey);
@@ -1021,7 +1021,7 @@ static int pac_address_keys_get(struct task_struct *target,
                                unsigned int pos, unsigned int count,
                                void *kbuf, void __user *ubuf)
 {
-       struct ptrauth_keys *keys = &target->thread.keys_user;
+       struct ptrauth_keys_user *keys = &target->thread.keys_user;
        struct user_pac_address_keys user_keys;
 
        if (!system_supports_address_auth())
@@ -1038,7 +1038,7 @@ static int pac_address_keys_set(struct task_struct *target,
                                unsigned int pos, unsigned int count,
                                const void *kbuf, const void __user *ubuf)
 {
-       struct ptrauth_keys *keys = &target->thread.keys_user;
+       struct ptrauth_keys_user *keys = &target->thread.keys_user;
        struct user_pac_address_keys user_keys;
        int ret;
 
@@ -1056,12 +1056,12 @@ static int pac_address_keys_set(struct task_struct *target,
 }
 
 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
-                                    const struct ptrauth_keys *keys)
+                                    const struct ptrauth_keys_user *keys)
 {
        ukeys->apgakey = pac_key_to_user(&keys->apga);
 }
 
-static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
+static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
                                       const struct user_pac_generic_keys *ukeys)
 {
        keys->apga = pac_key_from_user(ukeys->apgakey);
@@ -1072,7 +1072,7 @@ static int pac_generic_keys_get(struct task_struct *target,
                                unsigned int pos, unsigned int count,
                                void *kbuf, void __user *ubuf)
 {
-       struct ptrauth_keys *keys = &target->thread.keys_user;
+       struct ptrauth_keys_user *keys = &target->thread.keys_user;
        struct user_pac_generic_keys user_keys;
 
        if (!system_supports_generic_auth())
@@ -1089,7 +1089,7 @@ static int pac_generic_keys_set(struct task_struct *target,
                                unsigned int pos, unsigned int count,
                                const void *kbuf, const void __user *ubuf)
 {
-       struct ptrauth_keys *keys = &target->thread.keys_user;
+       struct ptrauth_keys_user *keys = &target->thread.keys_user;
        struct user_pac_generic_keys user_keys;
        int ret;
 
index f5b04dd..7b2f2e6 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/assembler.h>
+#include <asm/smp.h>
 
        .text
 /*
@@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter)
        .pushsection ".idmap.text", "awx"
 ENTRY(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
+       mov     x0, #ARM64_CPU_RUNTIME
        bl      __cpu_setup
        /* enable the MMU early - so we can access sleep_save_stash by va */
        adrp    x1, swapper_pg_dir
index 0348067..fd4b2ec 100644 (file)
@@ -114,6 +114,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
         */
        secondary_data.task = idle;
        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
+#if defined(CONFIG_ARM64_PTR_AUTH)
+       secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo;
+       secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi;
+#endif
        update_cpu_boot_status(CPU_MMU_OFF);
        __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 
@@ -136,6 +140,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
+#if defined(CONFIG_ARM64_PTR_AUTH)
+       secondary_data.ptrauth_key.apia.lo = 0;
+       secondary_data.ptrauth_key.apia.hi = 0;
+#endif
        __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
index a336cb1..139679c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/stacktrace.h>
 
 #include <asm/irq.h>
+#include <asm/pointer_auth.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
@@ -86,7 +87,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (tsk->ret_stack &&
-                       (frame->pc == (unsigned long)return_to_handler)) {
+               (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
                struct ftrace_ret_stack *ret_stack;
                /*
                 * This is a case where function graph tracer has
@@ -101,6 +102,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
+       frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
        /*
         * Frames created upon entry from EL0 have NULL FP and PC values, so
         * don't bother reporting these. Frames created by __noreturn functions
index 6bd2280..197a9ba 100644 (file)
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
 #include <asm/hwcap.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
+#include <asm/smp.h>
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS   TCR_TG0_64K | TCR_TG1_64K
@@ -137,6 +139,7 @@ alternative_if ARM64_HAS_RAS_EXTN
        msr_s   SYS_DISR_EL1, xzr
 alternative_else_nop_endif
 
+       ptrauth_keys_install_kernel x14, 0, x1, x2, x3
        isb
        ret
 SYM_FUNC_END(cpu_do_resume)
@@ -381,32 +384,32 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
 /*
  *     __cpu_setup
  *
- *     Initialise the processor for turning the MMU on.  Return in x0 the
- *     value of the SCTLR_EL1 register.
+ *     Initialise the processor for turning the MMU on.
+ *
+ * Input:
+ *     x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME.
+ * Output:
+ *     Return in x0 the value of the SCTLR_EL1 register.
  */
        .pushsection ".idmap.text", "awx"
 SYM_FUNC_START(__cpu_setup)
        tlbi    vmalle1                         // Invalidate local TLB
        dsb     nsh
 
-       mov     x0, #3 << 20
-       msr     cpacr_el1, x0                   // Enable FP/ASIMD
-       mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
-       msr     mdscr_el1, x0                   // access to the DCC from EL0
+       mov     x1, #3 << 20
+       msr     cpacr_el1, x1                   // Enable FP/ASIMD
+       mov     x1, #1 << 12                    // Reset mdscr_el1 and disable
+       msr     mdscr_el1, x1                   // access to the DCC from EL0
        isb                                     // Unmask debug exceptions now,
        enable_dbg                              // since this is per-cpu
-       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
-       reset_amuserenr_el0 x0                  // Disable AMU access from EL0
+       reset_pmuserenr_el0 x1                  // Disable PMU access from EL0
+       reset_amuserenr_el0 x1                  // Disable AMU access from EL0
 
        /*
         * Memory region attributes
         */
        mov_q   x5, MAIR_EL1_SET
        msr     mair_el1, x5
-       /*
-        * Prepare SCTLR
-        */
-       mov_q   x0, SCTLR_EL1_SET
        /*
         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
         * both user and kernel.
@@ -443,5 +446,51 @@ SYM_FUNC_START(__cpu_setup)
 1:
 #endif /* CONFIG_ARM64_HW_AFDBM */
        msr     tcr_el1, x10
+       mov     x1, x0
+       /*
+        * Prepare SCTLR
+        */
+       mov_q   x0, SCTLR_EL1_SET
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+       /* No ptrauth setup for run time cpus */
+       cmp     x1, #ARM64_CPU_RUNTIME
+       b.eq    3f
+
+       /* Check if the CPU supports ptrauth */
+       mrs     x2, id_aa64isar1_el1
+       ubfx    x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
+       cbz     x2, 3f
+
+       /*
+        * The primary cpu keys are reset here and can be
+        * re-initialised with some proper values later.
+        */
+       msr_s   SYS_APIAKEYLO_EL1, xzr
+       msr_s   SYS_APIAKEYHI_EL1, xzr
+
+       /* Just enable ptrauth for primary cpu */
+       cmp     x1, #ARM64_CPU_BOOT_PRIMARY
+       b.eq    2f
+
+       /* if !system_supports_address_auth() then skip enable */
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+       b       3f
+alternative_else_nop_endif
+
+       /* Install ptrauth key for secondary cpus */
+       adr_l   x2, secondary_data
+       ldr     x3, [x2, #CPU_BOOT_TASK]        // get secondary_data.task
+       cbz     x3, 2f                          // check for slow booting cpus
+       ldp     x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY]
+       msr_s   SYS_APIAKEYLO_EL1, x3
+       msr_s   SYS_APIAKEYHI_EL1, x4
+
+2:     /* Enable ptrauth instructions */
+       ldr     x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+                    SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
+       orr     x0, x0, x2
+3:
+#endif
        ret                                     // return to head.S
 SYM_FUNC_END(__cpu_setup)
index de87693..cc92bc3 100644 (file)
@@ -378,3 +378,39 @@ void lkdtm_DOUBLE_FAULT(void)
        pr_err("XFAIL: this test is ia32-only\n");
 #endif
 }
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static noinline void change_pac_parameters(void)
+{
+       /* Reset the keys of current task */
+       ptrauth_thread_init_kernel(current);
+       ptrauth_thread_switch_kernel(current);
+}
+
+#define CORRUPT_PAC_ITERATE    10
+noinline void lkdtm_CORRUPT_PAC(void)
+{
+       int i;
+
+       if (!system_supports_address_auth()) {
+               pr_err("FAIL: arm64 pointer authentication feature not present\n");
+               return;
+       }
+
+       pr_info("Change the PAC parameters to force function return failure\n");
+       /*
+        * Pac is a hash value computed from input keys, return address and
+        * stack pointer. As pac has fewer bits so there is a chance of
+        * collision, so iterate few times to reduce the collision probability.
+        */
+       for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
+               change_pac_parameters();
+
+       pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
+}
+#else /* !CONFIG_ARM64_PTR_AUTH */
+noinline void lkdtm_CORRUPT_PAC(void)
+{
+       pr_err("FAIL: arm64 pointer authentication config disabled\n");
+}
+#endif
index ee0d6e7..5ce4ac8 100644 (file)
@@ -116,6 +116,7 @@ static const struct crashtype crashtypes[] = {
        CRASHTYPE(STACK_GUARD_PAGE_LEADING),
        CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
        CRASHTYPE(UNSET_SMEP),
+       CRASHTYPE(CORRUPT_PAC),
        CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
        CRASHTYPE(OVERWRITE_ALLOCATION),
        CRASHTYPE(WRITE_AFTER_FREE),
index c56d23e..8d13d01 100644 (file)
@@ -31,6 +31,7 @@ void lkdtm_UNSET_SMEP(void);
 #ifdef CONFIG_X86_32
 void lkdtm_DOUBLE_FAULT(void);
 #endif
+void lkdtm_CORRUPT_PAC(void);
 
 /* lkdtm_heap.c */
 void __init lkdtm_heap_init(void);
index 6b792d0..4c678c4 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
 # include <asm/stackprotector.h>
 #else
 static inline void boot_init_stack_canary(void)
index 85334dc..a1c1925 100644 (file)
@@ -31,6 +31,12 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /de
 # Return y if the linker supports <flag>, n otherwise
 ld-option = $(success,$(LD) -v $(1))
 
+# $(as-option,<flag>)
+# /dev/zero is used as output instead of /dev/null as some assembler cribs when
+# both input and output are same. Also both of them have same write behaviour so
+# can be easily substituted.
+as-option = $(success, $(CC) $(CLANG_FLAGS) $(1) -c -x assembler /dev/null -o /dev/zero)
+
 # $(as-instr,<instr>)
 # Return y if the assembler supports <instr>, n otherwise
 as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)