1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level exception handling code
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
10 #include <linux/arm-smccc.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
14 #include <asm/alternative.h>
15 #include <asm/assembler.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/asm_pointer_auth.h>
19 #include <asm/cpufeature.h>
20 #include <asm/errno.h>
23 #include <asm/memory.h>
25 #include <asm/processor.h>
26 #include <asm/ptrace.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-uaccess.h>
30 #include <asm/unistd.h>
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
36 .macro user_exit_irqoff
37 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38 bl enter_from_user_mode
42 .macro user_enter_irqoff
43 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
49 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
63 .macro kernel_ventry, el, label, regsize = 64
65 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
67 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
74 alternative_else_nop_endif
78 sub sp, sp, #PT_REGS_SIZE
79 #ifdef CONFIG_VMAP_STACK
81 * Test whether the SP has overflowed, without corrupting a GPR.
82 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
83 * should always be zero.
85 add sp, sp, x0 // sp' = sp + x0
86 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
87 tbnz x0, #THREAD_SHIFT, 0f
88 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
89 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
94 * Either we've just detected an overflow, or we've taken an exception
95 * while on the overflow stack. Either way, we won't return to
96 * userspace, and can clobber EL0 registers to free up GPRs.
99 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
102 /* Recover the original x0 value and stash it in tpidrro_el0 */
106 /* Switch to the overflow stack */
107 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
110 * Check whether we were already on the overflow stack. This may happen
111 * after panic() re-enables interrupts.
113 mrs x0, tpidr_el0 // sp of interrupted context
114 sub x0, sp, x0 // delta with top of overflow stack
115 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
116 b.ne __bad_stack // no? -> bad stack pointer
118 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
125 .macro tramp_alias, dst, sym
126 mov_q \dst, TRAMP_VALIAS
127 add \dst, \dst, #(\sym - .entry.tramp.text)
131 * This macro corrupts x0-x3. It is the caller's duty to save/restore
134 .macro apply_ssbd, state, tmp1, tmp2
135 alternative_cb spectre_v4_patch_fw_mitigation_enable
136 b .L__asm_ssbd_skip\@ // Patched to NOP
138 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
139 cbz \tmp2, .L__asm_ssbd_skip\@
140 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
141 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
142 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
144 alternative_cb spectre_v4_patch_fw_mitigation_conduit
145 nop // Patched to SMC/HVC #0
150 /* Check for MTE asynchronous tag check faults */
151 .macro check_mte_async_tcf, tmp, ti_flags
152 #ifdef CONFIG_ARM64_MTE
154 alternative_if_not ARM64_MTE
156 alternative_else_nop_endif
157 mrs_s \tmp, SYS_TFSRE0_EL1
158 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
159 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
160 mov \tmp, #_TIF_MTE_ASYNC_FAULT
161 add \ti_flags, tsk, #TSK_TI_FLAGS
162 stset \tmp, [\ti_flags]
163 msr_s SYS_TFSRE0_EL1, xzr
168 /* Clear the MTE asynchronous tag check faults */
169 .macro clear_mte_async_tcf
170 #ifdef CONFIG_ARM64_MTE
171 alternative_if ARM64_MTE
173 msr_s SYS_TFSRE0_EL1, xzr
174 alternative_else_nop_endif
178 .macro mte_set_gcr, tmp, tmp2
179 #ifdef CONFIG_ARM64_MTE
181 * Calculate and set the exclude mask preserving
182 * the RRND (bit[16]) setting.
184 mrs_s \tmp2, SYS_GCR_EL1
185 bfi \tmp2, \tmp, #0, #16
186 msr_s SYS_GCR_EL1, \tmp2
190 .macro mte_set_kernel_gcr, tmp, tmp2
191 #ifdef CONFIG_KASAN_HW_TAGS
192 alternative_if_not ARM64_MTE
194 alternative_else_nop_endif
195 ldr_l \tmp, gcr_kernel_excl
197 mte_set_gcr \tmp, \tmp2
203 .macro mte_set_user_gcr, tsk, tmp, tmp2
204 #ifdef CONFIG_ARM64_MTE
205 alternative_if_not ARM64_MTE
207 alternative_else_nop_endif
208 ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
210 mte_set_gcr \tmp, \tmp2
215 .macro kernel_entry, el, regsize = 64
217 mov w0, w0 // zero upper 32 bits of x0
219 stp x0, x1, [sp, #16 * 0]
220 stp x2, x3, [sp, #16 * 1]
221 stp x4, x5, [sp, #16 * 2]
222 stp x6, x7, [sp, #16 * 3]
223 stp x8, x9, [sp, #16 * 4]
224 stp x10, x11, [sp, #16 * 5]
225 stp x12, x13, [sp, #16 * 6]
226 stp x14, x15, [sp, #16 * 7]
227 stp x16, x17, [sp, #16 * 8]
228 stp x18, x19, [sp, #16 * 9]
229 stp x20, x21, [sp, #16 * 10]
230 stp x22, x23, [sp, #16 * 11]
231 stp x24, x25, [sp, #16 * 12]
232 stp x26, x27, [sp, #16 * 13]
233 stp x28, x29, [sp, #16 * 14]
238 ldr_this_cpu tsk, __entry_task, x20
242 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
245 ldr x19, [tsk, #TSK_TI_FLAGS]
246 disable_step_tsk x19, x20
248 /* Check for asynchronous tag check faults in user space */
249 check_mte_async_tcf x22, x23
250 apply_ssbd 1, x22, x23
252 #ifdef CONFIG_ARM64_PTR_AUTH
253 alternative_if ARM64_HAS_ADDRESS_AUTH
255 * Enable IA for in-kernel PAC if the task had it disabled. Although
256 * this could be implemented with an unconditional MRS which would avoid
257 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
259 * Install the kernel IA key only if IA was enabled in the task. If IA
260 * was disabled on kernel exit then we would have left the kernel IA
261 * installed so there is no need to install it again.
263 ldr x0, [tsk, THREAD_SCTLR_USER]
264 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
265 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
269 orr x0, x0, SCTLR_ELx_ENIA
273 alternative_else_nop_endif
276 mte_set_kernel_gcr x22, x23
280 add x21, sp, #PT_REGS_SIZE
282 .endif /* \el == 0 */
285 stp lr, x21, [sp, #S_LR]
288 * For exceptions from EL0, terminate the callchain here.
289 * For exceptions from EL1, create a synthetic frame record so the
290 * interrupted code shows up in the backtrace.
295 stp x29, x22, [sp, #S_STACKFRAME]
296 add x29, sp, #S_STACKFRAME
299 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
300 alternative_if_not ARM64_HAS_PAN
301 bl __swpan_entry_el\el
302 alternative_else_nop_endif
305 stp x22, x23, [sp, #S_PC]
307 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
310 str w21, [sp, #S_SYSCALLNO]
314 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
315 mrs_s x20, SYS_ICC_PMR_EL1
316 str x20, [sp, #S_PMR_SAVE]
317 alternative_else_nop_endif
319 /* Re-enable tag checking (TCO set on exception entry) */
320 #ifdef CONFIG_ARM64_MTE
321 alternative_if ARM64_MTE
323 alternative_else_nop_endif
327 * Registers that may be useful after this macro is invoked:
332 * x23 - aborted PSTATE
336 .macro kernel_exit, el
342 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
343 ldr x20, [sp, #S_PMR_SAVE]
344 msr_s SYS_ICC_PMR_EL1, x20
345 mrs_s x21, SYS_ICC_CTLR_EL1
346 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
347 dsb sy // Ensure priority change is seen by redistributor
349 alternative_else_nop_endif
351 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
353 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
354 alternative_if_not ARM64_HAS_PAN
355 bl __swpan_exit_el\el
356 alternative_else_nop_endif
360 ldr x23, [sp, #S_SP] // load return stack pointer
362 tst x22, #PSR_MODE32_BIT // native task?
365 #ifdef CONFIG_ARM64_ERRATUM_845719
366 alternative_if ARM64_WORKAROUND_845719
367 #ifdef CONFIG_PID_IN_CONTEXTIDR
368 mrs x29, contextidr_el1
369 msr contextidr_el1, x29
371 msr contextidr_el1, xzr
373 alternative_else_nop_endif
378 #ifdef CONFIG_ARM64_PTR_AUTH
379 alternative_if ARM64_HAS_ADDRESS_AUTH
381 * IA was enabled for in-kernel PAC. Disable it now if needed, or
382 * alternatively install the user's IA. All other per-task keys and
383 * SCTLR bits were updated on task switch.
385 * No kernel C function calls after this.
387 ldr x0, [tsk, THREAD_SCTLR_USER]
388 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
389 __ptrauth_keys_install_user tsk, x0, x1, x2
393 bic x0, x0, SCTLR_ELx_ENIA
396 alternative_else_nop_endif
399 mte_set_user_gcr tsk, x0, x1
404 msr elr_el1, x21 // set up the return data
406 ldp x0, x1, [sp, #16 * 0]
407 ldp x2, x3, [sp, #16 * 1]
408 ldp x4, x5, [sp, #16 * 2]
409 ldp x6, x7, [sp, #16 * 3]
410 ldp x8, x9, [sp, #16 * 4]
411 ldp x10, x11, [sp, #16 * 5]
412 ldp x12, x13, [sp, #16 * 6]
413 ldp x14, x15, [sp, #16 * 7]
414 ldp x16, x17, [sp, #16 * 8]
415 ldp x18, x19, [sp, #16 * 9]
416 ldp x20, x21, [sp, #16 * 10]
417 ldp x22, x23, [sp, #16 * 11]
418 ldp x24, x25, [sp, #16 * 12]
419 ldp x26, x27, [sp, #16 * 13]
420 ldp x28, x29, [sp, #16 * 14]
422 add sp, sp, #PT_REGS_SIZE // restore sp
425 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
426 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
429 tramp_alias x30, tramp_exit_native
432 tramp_alias x30, tramp_exit_compat
436 /* Ensure any device/NC reads complete */
437 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
444 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
446 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
447 * EL0, there is no need to check the state of TTBR0_EL1 since
448 * accesses are always enabled.
449 * Note that the meaning of this bit differs from the ARMv8.1 PAN
450 * feature as all TTBR0_EL1 accesses are disabled, not just those to
453 SYM_CODE_START_LOCAL(__swpan_entry_el1)
455 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
456 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
457 b.eq 1f // TTBR0 access already disabled
458 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
459 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
460 __uaccess_ttbr0_disable x21
462 SYM_CODE_END(__swpan_entry_el1)
465 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
468 SYM_CODE_START_LOCAL(__swpan_exit_el1)
469 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
470 __uaccess_ttbr0_enable x0, x1
471 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
473 SYM_CODE_END(__swpan_exit_el1)
475 SYM_CODE_START_LOCAL(__swpan_exit_el0)
476 __uaccess_ttbr0_enable x0, x1
478 * Enable errata workarounds only if returning to user. The only
479 * workaround currently required for TTBR0_EL1 changes are for the
480 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
483 b post_ttbr_update_workaround
484 SYM_CODE_END(__swpan_exit_el0)
487 .macro irq_stack_entry
488 mov x19, sp // preserve the original sp
489 #ifdef CONFIG_SHADOW_CALL_STACK
490 mov x24, scs_sp // preserve the original shadow stack
494 * Compare sp with the base of the task stack.
495 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
496 * and should switch to the irq stack.
498 ldr x25, [tsk, TSK_STACK]
500 and x25, x25, #~(THREAD_SIZE - 1)
503 ldr_this_cpu x25, irq_stack_ptr, x26
504 mov x26, #IRQ_STACK_SIZE
507 /* switch to the irq stack */
510 #ifdef CONFIG_SHADOW_CALL_STACK
511 /* also switch to the irq shadow stack */
512 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
519 * The callee-saved regs (x19-x29) should be preserved between
520 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
521 * uses x20-x23 to store data for later use.
523 .macro irq_stack_exit
525 #ifdef CONFIG_SHADOW_CALL_STACK
530 /* GPRs used by entry code */
531 tsk .req x28 // current thread_info
534 * Interrupt handling.
536 .macro irq_handler, handler:req
544 .macro gic_prio_kentry_setup, tmp:req
545 #ifdef CONFIG_ARM64_PSEUDO_NMI
546 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
547 mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
548 msr_s SYS_ICC_PMR_EL1, \tmp
549 alternative_else_nop_endif
553 .macro gic_prio_irq_setup, pmr:req, tmp:req
554 #ifdef CONFIG_ARM64_PSEUDO_NMI
555 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
556 orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
557 msr_s SYS_ICC_PMR_EL1, \tmp
558 alternative_else_nop_endif
562 .macro el1_interrupt_handler, handler:req
563 gic_prio_irq_setup pmr=x20, tmp=x1
567 bl enter_el1_irq_or_nmi
571 #ifdef CONFIG_PREEMPTION
572 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
573 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
575 * DA were cleared at start of handling, and IF are cleared by
576 * the GIC irqchip driver using gic_arch_enable_irqs() for
577 * normal IRQs. If anything is set, it means we come back from
578 * an NMI instead of a normal IRQ, so skip preemption
582 alternative_else_nop_endif
583 cbnz x24, 1f // preempt count != 0 || NMI return path
584 bl arm64_preempt_schedule_irq // irq en/disable is done inside
589 bl exit_el1_irq_or_nmi
592 .macro el0_interrupt_handler, handler:req
593 gic_prio_irq_setup pmr=x20, tmp=x0
598 bl do_el0_irq_bp_hardening
608 .pushsection ".entry.text", "ax"
611 SYM_CODE_START(vectors)
612 kernel_ventry 1, sync_invalid // Synchronous EL1t
613 kernel_ventry 1, irq_invalid // IRQ EL1t
614 kernel_ventry 1, fiq_invalid // FIQ EL1t
615 kernel_ventry 1, error_invalid // Error EL1t
617 kernel_ventry 1, sync // Synchronous EL1h
618 kernel_ventry 1, irq // IRQ EL1h
619 kernel_ventry 1, fiq // FIQ EL1h
620 kernel_ventry 1, error // Error EL1h
622 kernel_ventry 0, sync // Synchronous 64-bit EL0
623 kernel_ventry 0, irq // IRQ 64-bit EL0
624 kernel_ventry 0, fiq // FIQ 64-bit EL0
625 kernel_ventry 0, error // Error 64-bit EL0
628 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
629 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
630 kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0
631 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
633 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
634 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
635 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
636 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
638 SYM_CODE_END(vectors)
640 #ifdef CONFIG_VMAP_STACK
642 * We detected an overflow in kernel_ventry, which switched to the
643 * overflow stack. Stash the exception regs, and head to our overflow
647 /* Restore the original x0 value */
651 * Store the original GPRs to the new stack. The orginal SP (minus
652 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
654 sub sp, sp, #PT_REGS_SIZE
657 add x0, x0, #PT_REGS_SIZE
660 /* Stash the regs for handle_bad_stack */
666 #endif /* CONFIG_VMAP_STACK */
669 * Invalid mode handlers
671 .macro inv_entry, el, reason, regsize = 64
672 kernel_entry \el, \regsize
680 SYM_CODE_START_LOCAL(el0_sync_invalid)
681 inv_entry 0, BAD_SYNC
682 SYM_CODE_END(el0_sync_invalid)
684 SYM_CODE_START_LOCAL(el0_irq_invalid)
686 SYM_CODE_END(el0_irq_invalid)
688 SYM_CODE_START_LOCAL(el0_fiq_invalid)
690 SYM_CODE_END(el0_fiq_invalid)
692 SYM_CODE_START_LOCAL(el0_error_invalid)
693 inv_entry 0, BAD_ERROR
694 SYM_CODE_END(el0_error_invalid)
696 SYM_CODE_START_LOCAL(el1_sync_invalid)
697 inv_entry 1, BAD_SYNC
698 SYM_CODE_END(el1_sync_invalid)
700 SYM_CODE_START_LOCAL(el1_irq_invalid)
702 SYM_CODE_END(el1_irq_invalid)
704 SYM_CODE_START_LOCAL(el1_fiq_invalid)
706 SYM_CODE_END(el1_fiq_invalid)
708 SYM_CODE_START_LOCAL(el1_error_invalid)
709 inv_entry 1, BAD_ERROR
710 SYM_CODE_END(el1_error_invalid)
716 SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
721 SYM_CODE_END(el1_sync)
724 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
726 el1_interrupt_handler handle_arch_irq
728 SYM_CODE_END(el1_irq)
730 SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
732 el1_interrupt_handler handle_arch_fiq
734 SYM_CODE_END(el1_fiq)
740 SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
745 SYM_CODE_END(el0_sync)
749 SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
752 bl el0_sync_compat_handler
754 SYM_CODE_END(el0_sync_compat)
757 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
760 SYM_CODE_END(el0_irq_compat)
762 SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
765 SYM_CODE_END(el0_fiq_compat)
767 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
770 SYM_CODE_END(el0_error_compat)
774 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
777 el0_interrupt_handler handle_arch_irq
779 SYM_CODE_END(el0_irq)
781 SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
784 el0_interrupt_handler handle_arch_fiq
786 SYM_CODE_END(el0_fiq)
788 SYM_CODE_START_LOCAL(el1_error)
791 gic_prio_kentry_setup tmp=x2
796 SYM_CODE_END(el1_error)
798 SYM_CODE_START_LOCAL(el0_error)
802 gic_prio_kentry_setup tmp=x2
810 SYM_CODE_END(el0_error)
813 * "slow" syscall return path.
815 SYM_CODE_START_LOCAL(ret_to_user)
817 gic_prio_kentry_setup tmp=x3
818 #ifdef CONFIG_TRACE_IRQFLAGS
819 bl trace_hardirqs_off
821 ldr x19, [tsk, #TSK_TI_FLAGS]
822 and x2, x19, #_TIF_WORK_MASK
823 cbnz x2, work_pending
826 /* Ignore asynchronous tag check faults in the uaccess routines */
828 enable_step_tsk x19, x2
829 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
835 * Ok, we need to do extra processing, enter the slow path.
841 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
843 SYM_CODE_END(ret_to_user)
845 .popsection // .entry.text
847 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
849 * Exception vectors trampoline.
851 .pushsection ".entry.tramp.text", "ax"
853 // Move from tramp_pg_dir to swapper_pg_dir
854 .macro tramp_map_kernel, tmp
856 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
857 bic \tmp, \tmp, #USER_ASID_FLAG
859 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
860 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
861 /* ASID already in \tmp[63:48] */
862 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
863 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
864 /* 2MB boundary containing the vectors, so we nobble the walk cache */
865 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
869 alternative_else_nop_endif
870 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
873 // Move from swapper_pg_dir to tramp_pg_dir
874 .macro tramp_unmap_kernel, tmp
876 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
877 orr \tmp, \tmp, #USER_ASID_FLAG
880 * We avoid running the post_ttbr_update_workaround here because
881 * it's only needed by Cavium ThunderX, which requires KPTI to be
886 .macro tramp_ventry, regsize = 64
890 msr tpidrro_el0, x30 // Restored in kernel_ventry
893 * Defend against branch aliasing attacks by pushing a dummy
894 * entry onto the return stack and using a RET instruction to
895 * enter the full-fat kernel vectors.
901 #ifdef CONFIG_RANDOMIZE_BASE
902 adr x30, tramp_vectors + PAGE_SIZE
903 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
908 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
909 prfm plil1strm, [x30, #(1b - tramp_vectors)]
910 alternative_else_nop_endif
912 add x30, x30, #(1b - tramp_vectors)
917 .macro tramp_exit, regsize = 64
918 adr x30, tramp_vectors
920 tramp_unmap_kernel x30
929 SYM_CODE_START_NOALIGN(tramp_vectors)
941 SYM_CODE_END(tramp_vectors)
943 SYM_CODE_START(tramp_exit_native)
945 SYM_CODE_END(tramp_exit_native)
947 SYM_CODE_START(tramp_exit_compat)
949 SYM_CODE_END(tramp_exit_compat)
952 .popsection // .entry.tramp.text
953 #ifdef CONFIG_RANDOMIZE_BASE
954 .pushsection ".rodata", "a"
956 SYM_DATA_START(__entry_tramp_data_start)
958 SYM_DATA_END(__entry_tramp_data_start)
959 .popsection // .rodata
960 #endif /* CONFIG_RANDOMIZE_BASE */
961 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
964 * Register switch for AArch64. The callee-saved registers need to be saved
965 * and restored. On entry:
966 * x0 = previous task_struct (must be preserved across the switch)
967 * x1 = next task_struct
968 * Previous and next are guaranteed not to be the same.
971 SYM_FUNC_START(cpu_switch_to)
972 mov x10, #THREAD_CPU_CONTEXT
975 stp x19, x20, [x8], #16 // store callee-saved registers
976 stp x21, x22, [x8], #16
977 stp x23, x24, [x8], #16
978 stp x25, x26, [x8], #16
979 stp x27, x28, [x8], #16
980 stp x29, x9, [x8], #16
983 ldp x19, x20, [x8], #16 // restore callee-saved registers
984 ldp x21, x22, [x8], #16
985 ldp x23, x24, [x8], #16
986 ldp x25, x26, [x8], #16
987 ldp x27, x28, [x8], #16
988 ldp x29, x9, [x8], #16
992 ptrauth_keys_install_kernel x1, x8, x9, x10
996 SYM_FUNC_END(cpu_switch_to)
997 NOKPROBE(cpu_switch_to)
1000 * This is how we return from a fork.
1002 SYM_CODE_START(ret_from_fork)
1004 cbz x19, 1f // not a kernel thread
1007 1: get_current_task tsk
1009 SYM_CODE_END(ret_from_fork)
1010 NOKPROBE(ret_from_fork)
1012 #ifdef CONFIG_ARM_SDE_INTERFACE
1014 #include <asm/sdei.h>
1015 #include <uapi/linux/arm_sdei.h>
1017 .macro sdei_handler_exit exit_mode
1018 /* On success, this call never returns... */
1019 cmp \exit_mode, #SDEI_EXIT_SMC
1027 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1029 * The regular SDEI entry point may have been unmapped along with the rest of
1030 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1031 * argument accessible.
1033 * This clobbers x4, __sdei_handler() will restore this from firmware's
1037 .pushsection ".entry.tramp.text", "ax"
1038 SYM_CODE_START(__sdei_asm_entry_trampoline)
1040 tbz x4, #USER_ASID_BIT, 1f
1042 tramp_map_kernel tmp=x4
1047 * Remember whether to unmap the kernel on exit.
1049 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1051 #ifdef CONFIG_RANDOMIZE_BASE
1052 adr x4, tramp_vectors + PAGE_SIZE
1053 add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1056 ldr x4, =__sdei_asm_handler
1059 SYM_CODE_END(__sdei_asm_entry_trampoline)
1060 NOKPROBE(__sdei_asm_entry_trampoline)
1063 * Make the exit call and restore the original ttbr1_el1
1065 * x0 & x1: setup for the exit API call
1067 * x4: struct sdei_registered_event argument from registration time.
1069 SYM_CODE_START(__sdei_asm_exit_trampoline)
1070 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
1073 tramp_unmap_kernel tmp=x4
1075 1: sdei_handler_exit exit_mode=x2
1076 SYM_CODE_END(__sdei_asm_exit_trampoline)
1077 NOKPROBE(__sdei_asm_exit_trampoline)
1079 .popsection // .entry.tramp.text
1080 #ifdef CONFIG_RANDOMIZE_BASE
1081 .pushsection ".rodata", "a"
1082 SYM_DATA_START(__sdei_asm_trampoline_next_handler)
1083 .quad __sdei_asm_handler
1084 SYM_DATA_END(__sdei_asm_trampoline_next_handler)
1085 .popsection // .rodata
1086 #endif /* CONFIG_RANDOMIZE_BASE */
1087 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1090 * Software Delegated Exception entry point.
1093 * x1: struct sdei_registered_event argument from registration time.
1094 * x2: interrupted PC
1095 * x3: interrupted PSTATE
1096 * x4: maybe clobbered by the trampoline
1098 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1099 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1102 SYM_CODE_START(__sdei_asm_handler)
1103 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1104 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1105 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1106 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1107 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1108 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1109 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1110 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1111 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1112 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1113 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1114 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1115 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1116 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1118 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1122 #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1123 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1126 #ifdef CONFIG_VMAP_STACK
1128 * entry.S may have been using sp as a scratch register, find whether
1129 * this is a normal or critical event and switch to the appropriate
1130 * stack for this CPU.
1133 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1135 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1136 2: mov x6, #SDEI_STACK_SIZE
1141 #ifdef CONFIG_SHADOW_CALL_STACK
1142 /* Use a separate shadow call stack for normal and critical events */
1144 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1146 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
1151 * We may have interrupted userspace, or a guest, or exit-from or
1152 * return-to either of these. We can't trust sp_el0, restore it.
1155 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1158 /* If we interrupted the kernel point to the previous stack/frame. */
1162 csel x29, x29, xzr, eq // fp, or zero
1163 csel x4, x2, xzr, eq // elr, or zero
1165 stp x29, x4, [sp, #-16]!
1168 add x0, x19, #SDEI_EVENT_INTREGS
1173 /* restore regs >x17 that we clobbered */
1174 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1175 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1176 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1177 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1180 mov x1, x0 // address to complete_and_resume
1181 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1183 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1184 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1187 ldr_l x2, sdei_exit_mode
1189 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1190 sdei_handler_exit exit_mode=x2
1191 alternative_else_nop_endif
1193 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1194 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1197 SYM_CODE_END(__sdei_asm_handler)
1198 NOKPROBE(__sdei_asm_handler)
1199 #endif /* CONFIG_ARM_SDE_INTERFACE */