Merge tag 'docs-5.15' of git://git.lwn.net/linux
[linux-2.6-microblaze.git] / arch / arm64 / kernel / entry.S
index 863d44f..bc6d5a9 100644 (file)
 #include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
-/*
- * Context tracking and irqflag tracing need to instrument transitions between
- * user and kernel mode.
- */
-       .macro user_enter_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
-       bl      exit_to_user_mode
-#endif
-       .endm
-
        .macro  clear_gp_regs
        .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
        mov     x\n, xzr
@@ -133,42 +123,46 @@ alternative_cb_end
        .endm
 
        /* Check for MTE asynchronous tag check faults */
-       .macro check_mte_async_tcf, tmp, ti_flags
+       .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
 #ifdef CONFIG_ARM64_MTE
        .arch_extension lse
 alternative_if_not ARM64_MTE
        b       1f
 alternative_else_nop_endif
+       /*
+        * Asynchronous tag check faults are only possible in ASYNC (2) or
+        * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
+        * set, so skip the check if it is unset.
+        */
+       tbz     \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
        mrs_s   \tmp, SYS_TFSRE0_EL1
        tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
        /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
        mov     \tmp, #_TIF_MTE_ASYNC_FAULT
        add     \ti_flags, tsk, #TSK_TI_FLAGS
        stset   \tmp, [\ti_flags]
-       msr_s   SYS_TFSRE0_EL1, xzr
 1:
 #endif
        .endm
 
        /* Clear the MTE asynchronous tag check faults */
-       .macro clear_mte_async_tcf
+       .macro clear_mte_async_tcf thread_sctlr
 #ifdef CONFIG_ARM64_MTE
 alternative_if ARM64_MTE
+       /* See comment in check_mte_async_tcf above. */
+       tbz     \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
        dsb     ish
        msr_s   SYS_TFSRE0_EL1, xzr
+1:
 alternative_else_nop_endif
 #endif
        .endm
 
-       .macro mte_set_gcr, tmp, tmp2
+       .macro mte_set_gcr, mte_ctrl, tmp
 #ifdef CONFIG_ARM64_MTE
-       /*
-        * Calculate and set the exclude mask preserving
-        * the RRND (bit[16]) setting.
-        */
-       mrs_s   \tmp2, SYS_GCR_EL1
-       bfi     \tmp2, \tmp, #0, #16
-       msr_s   SYS_GCR_EL1, \tmp2
+       ubfx    \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
+       orr     \tmp, \tmp, #SYS_GCR_EL1_RRND
+       msr_s   SYS_GCR_EL1, \tmp
 #endif
        .endm
 
@@ -177,10 +171,8 @@ alternative_else_nop_endif
 alternative_if_not ARM64_MTE
        b       1f
 alternative_else_nop_endif
-       ldr_l   \tmp, gcr_kernel_excl
-
-       mte_set_gcr \tmp, \tmp2
-       isb
+       mov     \tmp, KERNEL_GCR_EL1
+       msr_s   SYS_GCR_EL1, \tmp
 1:
 #endif
        .endm
@@ -190,7 +182,7 @@ alternative_else_nop_endif
 alternative_if_not ARM64_MTE
        b       1f
 alternative_else_nop_endif
-       ldr     \tmp, [\tsk, #THREAD_GCR_EL1_USER]
+       ldr     \tmp, [\tsk, #THREAD_MTE_CTRL]
 
        mte_set_gcr \tmp, \tmp2
 1:
@@ -231,8 +223,8 @@ alternative_else_nop_endif
        disable_step_tsk x19, x20
 
        /* Check for asynchronous tag check faults in user space */
-       check_mte_async_tcf x22, x23
-       apply_ssbd 1, x22, x23
+       ldr     x0, [tsk, THREAD_SCTLR_USER]
+       check_mte_async_tcf x22, x23, x0
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -245,7 +237,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
         * was disabled on kernel exit then we would have left the kernel IA
         * installed so there is no need to install it again.
         */
-       ldr     x0, [tsk, THREAD_SCTLR_USER]
        tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f
        __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
        b       2f
@@ -254,12 +245,26 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
        orr     x0, x0, SCTLR_ELx_ENIA
        msr     sctlr_el1, x0
 2:
-       isb
 alternative_else_nop_endif
 #endif
 
+       apply_ssbd 1, x22, x23
+
        mte_set_kernel_gcr x22, x23
 
+       /*
+        * Any non-self-synchronizing system register updates required for
+        * kernel entry should be placed before this point.
+        */
+alternative_if ARM64_MTE
+       isb
+       b       1f
+alternative_else_nop_endif
+alternative_if ARM64_HAS_ADDRESS_AUTH
+       isb
+alternative_else_nop_endif
+1:
+
        scs_load tsk
        .else
        add     x21, sp, #PT_REGS_SIZE
@@ -362,6 +367,10 @@ alternative_else_nop_endif
 3:
        scs_save tsk
 
+       /* Ignore asynchronous tag check faults in the uaccess routines */
+       ldr     x0, [tsk, THREAD_SCTLR_USER]
+       clear_mte_async_tcf x0
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 alternative_if ARM64_HAS_ADDRESS_AUTH
        /*
@@ -371,7 +380,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
         *
         * No kernel C function calls after this.
         */
-       ldr     x0, [tsk, THREAD_SCTLR_USER]
        tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f
        __ptrauth_keys_install_user tsk, x0, x1, x2
        b       2f
@@ -474,18 +482,6 @@ SYM_CODE_END(__swpan_exit_el0)
 /* GPRs used by entry code */
 tsk    .req    x28             // current thread_info
 
-/*
- * Interrupt handling.
- */
-       .macro  gic_prio_kentry_setup, tmp:req
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-       alternative_if ARM64_HAS_IRQ_PRIO_MASKING
-       mov     \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
-       msr_s   SYS_ICC_PMR_EL1, \tmp
-       alternative_else_nop_endif
-#endif
-       .endm
-
        .text
 
 /*
@@ -517,12 +513,13 @@ SYM_CODE_START(vectors)
 SYM_CODE_END(vectors)
 
 #ifdef CONFIG_VMAP_STACK
+SYM_CODE_START_LOCAL(__bad_stack)
        /*
         * We detected an overflow in kernel_ventry, which switched to the
         * overflow stack. Stash the exception regs, and head to our overflow
         * handler.
         */
-__bad_stack:
+
        /* Restore the original x0 value */
        mrs     x0, tpidrro_el0
 
@@ -542,6 +539,7 @@ __bad_stack:
        /* Time to die */
        bl      handle_bad_stack
        ASM_BUG()
+SYM_CODE_END(__bad_stack)
 #endif /* CONFIG_VMAP_STACK */
 
 
@@ -585,37 +583,13 @@ SYM_CODE_START_LOCAL(ret_to_kernel)
        kernel_exit 1
 SYM_CODE_END(ret_to_kernel)
 
-/*
- * "slow" syscall return path.
- */
 SYM_CODE_START_LOCAL(ret_to_user)
-       disable_daif
-       gic_prio_kentry_setup tmp=x3
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
-       ldr     x19, [tsk, #TSK_TI_FLAGS]
-       and     x2, x19, #_TIF_WORK_MASK
-       cbnz    x2, work_pending
-finish_ret_to_user:
-       user_enter_irqoff
-       /* Ignore asynchronous tag check faults in the uaccess routines */
-       clear_mte_async_tcf
+       ldr     x19, [tsk, #TSK_TI_FLAGS]       // re-check for single-step
        enable_step_tsk x19, x2
 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
        bl      stackleak_erase
 #endif
        kernel_exit 0
-
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
-work_pending:
-       mov     x0, sp                          // 'regs'
-       mov     x1, x19
-       bl      do_notify_resume
-       ldr     x19, [tsk, #TSK_TI_FLAGS]       // re-check for single-step
-       b       finish_ret_to_user
 SYM_CODE_END(ret_to_user)
 
        .popsection                             // .entry.text
@@ -781,6 +755,8 @@ SYM_CODE_START(ret_from_fork)
        mov     x0, x20
        blr     x19
 1:     get_current_task tsk
+       mov     x0, sp
+       bl      asm_exit_to_user_mode
        b       ret_to_user
 SYM_CODE_END(ret_from_fork)
 NOKPROBE(ret_from_fork)