1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level exception handling code
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
10 #include <linux/arm-smccc.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
14 #include <asm/alternative.h>
15 #include <asm/assembler.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/asm_pointer_auth.h>
19 #include <asm/cpufeature.h>
20 #include <asm/errno.h>
23 #include <asm/memory.h>
25 #include <asm/processor.h>
26 #include <asm/ptrace.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-uaccess.h>
30 #include <asm/unistd.h>
33 * Context tracking subsystem. Used to instrument transitions
34 * between user and kernel mode.
36 .macro ct_user_exit_irqoff
37 #ifdef CONFIG_CONTEXT_TRACKING
38 bl enter_from_user_mode
43 #ifdef CONFIG_CONTEXT_TRACKING
44 bl context_tracking_user_enter
49 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
63 .macro kernel_ventry, el, label, regsize = 64
65 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
67 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
74 alternative_else_nop_endif
78 sub sp, sp, #S_FRAME_SIZE
79 #ifdef CONFIG_VMAP_STACK
81 * Test whether the SP has overflowed, without corrupting a GPR.
82 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
83 * should always be zero.
85 add sp, sp, x0 // sp' = sp + x0
86 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
87 tbnz x0, #THREAD_SHIFT, 0f
88 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
89 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
94 * Either we've just detected an overflow, or we've taken an exception
95 * while on the overflow stack. Either way, we won't return to
96 * userspace, and can clobber EL0 registers to free up GPRs.
99 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
102 /* Recover the original x0 value and stash it in tpidrro_el0 */
106 /* Switch to the overflow stack */
107 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
110 * Check whether we were already on the overflow stack. This may happen
111 * after panic() re-enables interrupts.
113 mrs x0, tpidr_el0 // sp of interrupted context
114 sub x0, sp, x0 // delta with top of overflow stack
115 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
116 b.ne __bad_stack // no? -> bad stack pointer
118 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
125 .macro tramp_alias, dst, sym
126 mov_q \dst, TRAMP_VALIAS
127 add \dst, \dst, #(\sym - .entry.tramp.text)
131 * This macro corrupts x0-x3. It is the caller's duty to save/restore
134 .macro apply_ssbd, state, tmp1, tmp2
135 #ifdef CONFIG_ARM64_SSBD
136 alternative_cb arm64_enable_wa2_handling
137 b .L__asm_ssbd_skip\@
139 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
140 cbz \tmp2, .L__asm_ssbd_skip\@
141 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
142 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
143 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
145 alternative_cb arm64_update_smccc_conduit
146 nop // Patched to SMC/HVC #0
152 .macro kernel_entry, el, regsize = 64
154 mov w0, w0 // zero upper 32 bits of x0
156 stp x0, x1, [sp, #16 * 0]
157 stp x2, x3, [sp, #16 * 1]
158 stp x4, x5, [sp, #16 * 2]
159 stp x6, x7, [sp, #16 * 3]
160 stp x8, x9, [sp, #16 * 4]
161 stp x10, x11, [sp, #16 * 5]
162 stp x12, x13, [sp, #16 * 6]
163 stp x14, x15, [sp, #16 * 7]
164 stp x16, x17, [sp, #16 * 8]
165 stp x18, x19, [sp, #16 * 9]
166 stp x20, x21, [sp, #16 * 10]
167 stp x22, x23, [sp, #16 * 11]
168 stp x24, x25, [sp, #16 * 12]
169 stp x26, x27, [sp, #16 * 13]
170 stp x28, x29, [sp, #16 * 14]
175 * If we're returning from a 32-bit task on a system affected by
176 * 1418040 then re-enable userspace access to the virtual counter.
178 #ifdef CONFIG_ARM64_ERRATUM_1418040
179 alternative_if ARM64_WORKAROUND_1418040
181 orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
183 alternative_else_nop_endif
188 ldr_this_cpu tsk, __entry_task, x20
192 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
195 ldr x19, [tsk, #TSK_TI_FLAGS]
196 disable_step_tsk x19, x20
198 apply_ssbd 1, x22, x23
200 ptrauth_keys_install_kernel tsk, x20, x22, x23
204 add x21, sp, #S_FRAME_SIZE
206 /* Save the task's original addr_limit and set USER_DS */
207 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
208 str x20, [sp, #S_ORIG_ADDR_LIMIT]
210 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
211 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
212 .endif /* \el == 0 */
215 stp lr, x21, [sp, #S_LR]
218 * In order to be able to dump the contents of struct pt_regs at the
219 * time the exception was taken (in case we attempt to walk the call
220 * stack later), chain it together with the stack frames.
223 stp xzr, xzr, [sp, #S_STACKFRAME]
225 stp x29, x22, [sp, #S_STACKFRAME]
227 add x29, sp, #S_STACKFRAME
229 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
230 alternative_if_not ARM64_HAS_PAN
231 bl __swpan_entry_el\el
232 alternative_else_nop_endif
235 stp x22, x23, [sp, #S_PC]
237 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
240 str w21, [sp, #S_SYSCALLNO]
244 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
245 mrs_s x20, SYS_ICC_PMR_EL1
246 str x20, [sp, #S_PMR_SAVE]
247 alternative_else_nop_endif
250 * Registers that may be useful after this macro is invoked:
255 * x23 - aborted PSTATE
259 .macro kernel_exit, el
263 /* Restore the task's original addr_limit. */
264 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
265 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
267 /* No need to restore UAO, it will be restored from SPSR_EL1 */
271 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
272 ldr x20, [sp, #S_PMR_SAVE]
273 msr_s SYS_ICC_PMR_EL1, x20
274 mrs_s x21, SYS_ICC_CTLR_EL1
275 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
276 dsb sy // Ensure priority change is seen by redistributor
278 alternative_else_nop_endif
280 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
285 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
286 alternative_if_not ARM64_HAS_PAN
287 bl __swpan_exit_el\el
288 alternative_else_nop_endif
292 ldr x23, [sp, #S_SP] // load return stack pointer
294 tst x22, #PSR_MODE32_BIT // native task?
297 #ifdef CONFIG_ARM64_ERRATUM_1418040
298 alternative_if ARM64_WORKAROUND_1418040
300 bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
302 alternative_else_nop_endif
305 #ifdef CONFIG_ARM64_ERRATUM_845719
306 alternative_if ARM64_WORKAROUND_845719
307 #ifdef CONFIG_PID_IN_CONTEXTIDR
308 mrs x29, contextidr_el1
309 msr contextidr_el1, x29
311 msr contextidr_el1, xzr
313 alternative_else_nop_endif
318 /* No kernel C function calls after this as user keys are set. */
319 ptrauth_keys_install_user tsk, x0, x1, x2
324 msr elr_el1, x21 // set up the return data
326 ldp x0, x1, [sp, #16 * 0]
327 ldp x2, x3, [sp, #16 * 1]
328 ldp x4, x5, [sp, #16 * 2]
329 ldp x6, x7, [sp, #16 * 3]
330 ldp x8, x9, [sp, #16 * 4]
331 ldp x10, x11, [sp, #16 * 5]
332 ldp x12, x13, [sp, #16 * 6]
333 ldp x14, x15, [sp, #16 * 7]
334 ldp x16, x17, [sp, #16 * 8]
335 ldp x18, x19, [sp, #16 * 9]
336 ldp x20, x21, [sp, #16 * 10]
337 ldp x22, x23, [sp, #16 * 11]
338 ldp x24, x25, [sp, #16 * 12]
339 ldp x26, x27, [sp, #16 * 13]
340 ldp x28, x29, [sp, #16 * 14]
342 add sp, sp, #S_FRAME_SIZE // restore sp
345 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
346 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
349 tramp_alias x30, tramp_exit_native
352 tramp_alias x30, tramp_exit_compat
361 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
363 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
364 * EL0, there is no need to check the state of TTBR0_EL1 since
365 * accesses are always enabled.
366 * Note that the meaning of this bit differs from the ARMv8.1 PAN
367 * feature as all TTBR0_EL1 accesses are disabled, not just those to
370 SYM_CODE_START_LOCAL(__swpan_entry_el1)
372 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
373 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
374 b.eq 1f // TTBR0 access already disabled
375 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
376 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
377 __uaccess_ttbr0_disable x21
379 SYM_CODE_END(__swpan_entry_el1)
382 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
385 SYM_CODE_START_LOCAL(__swpan_exit_el1)
386 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
387 __uaccess_ttbr0_enable x0, x1
388 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
390 SYM_CODE_END(__swpan_exit_el1)
392 SYM_CODE_START_LOCAL(__swpan_exit_el0)
393 __uaccess_ttbr0_enable x0, x1
395 * Enable errata workarounds only if returning to user. The only
396 * workaround currently required for TTBR0_EL1 changes are for the
397 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
400 b post_ttbr_update_workaround
401 SYM_CODE_END(__swpan_exit_el0)
404 .macro irq_stack_entry
405 mov x19, sp // preserve the original sp
406 #ifdef CONFIG_SHADOW_CALL_STACK
407 mov x24, scs_sp // preserve the original shadow stack
411 * Compare sp with the base of the task stack.
412 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
413 * and should switch to the irq stack.
415 ldr x25, [tsk, TSK_STACK]
417 and x25, x25, #~(THREAD_SIZE - 1)
420 ldr_this_cpu x25, irq_stack_ptr, x26
421 mov x26, #IRQ_STACK_SIZE
424 /* switch to the irq stack */
427 #ifdef CONFIG_SHADOW_CALL_STACK
428 /* also switch to the irq shadow stack */
429 adr_this_cpu scs_sp, irq_shadow_call_stack, x26
436 * The callee-saved regs (x19-x29) should be preserved between
437 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
438 * uses x20-x23 to store data for later use.
440 .macro irq_stack_exit
442 #ifdef CONFIG_SHADOW_CALL_STACK
447 /* GPRs used by entry code */
448 tsk .req x28 // current thread_info
451 * Interrupt handling.
454 ldr_l x1, handle_arch_irq
461 #ifdef CONFIG_ARM64_PSEUDO_NMI
463 * Set res to 0 if irqs were unmasked in interrupted context.
464 * Otherwise set res to non-0 value.
466 .macro test_irqs_unmasked res:req, pmr:req
467 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
468 sub \res, \pmr, #GIC_PRIO_IRQON
475 .macro gic_prio_kentry_setup, tmp:req
476 #ifdef CONFIG_ARM64_PSEUDO_NMI
477 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
478 mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
479 msr_s SYS_ICC_PMR_EL1, \tmp
480 alternative_else_nop_endif
484 .macro gic_prio_irq_setup, pmr:req, tmp:req
485 #ifdef CONFIG_ARM64_PSEUDO_NMI
486 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
487 orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
488 msr_s SYS_ICC_PMR_EL1, \tmp
489 alternative_else_nop_endif
498 .pushsection ".entry.text", "ax"
501 SYM_CODE_START(vectors)
502 kernel_ventry 1, sync_invalid // Synchronous EL1t
503 kernel_ventry 1, irq_invalid // IRQ EL1t
504 kernel_ventry 1, fiq_invalid // FIQ EL1t
505 kernel_ventry 1, error_invalid // Error EL1t
507 kernel_ventry 1, sync // Synchronous EL1h
508 kernel_ventry 1, irq // IRQ EL1h
509 kernel_ventry 1, fiq_invalid // FIQ EL1h
510 kernel_ventry 1, error // Error EL1h
512 kernel_ventry 0, sync // Synchronous 64-bit EL0
513 kernel_ventry 0, irq // IRQ 64-bit EL0
514 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
515 kernel_ventry 0, error // Error 64-bit EL0
518 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
519 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
520 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
521 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
523 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
524 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
525 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
526 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
528 SYM_CODE_END(vectors)
530 #ifdef CONFIG_VMAP_STACK
532 * We detected an overflow in kernel_ventry, which switched to the
533 * overflow stack. Stash the exception regs, and head to our overflow
537 /* Restore the original x0 value */
541 * Store the original GPRs to the new stack. The orginal SP (minus
542 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
544 sub sp, sp, #S_FRAME_SIZE
547 add x0, x0, #S_FRAME_SIZE
550 /* Stash the regs for handle_bad_stack */
556 #endif /* CONFIG_VMAP_STACK */
559 * Invalid mode handlers
561 .macro inv_entry, el, reason, regsize = 64
562 kernel_entry \el, \regsize
570 SYM_CODE_START_LOCAL(el0_sync_invalid)
571 inv_entry 0, BAD_SYNC
572 SYM_CODE_END(el0_sync_invalid)
574 SYM_CODE_START_LOCAL(el0_irq_invalid)
576 SYM_CODE_END(el0_irq_invalid)
578 SYM_CODE_START_LOCAL(el0_fiq_invalid)
580 SYM_CODE_END(el0_fiq_invalid)
582 SYM_CODE_START_LOCAL(el0_error_invalid)
583 inv_entry 0, BAD_ERROR
584 SYM_CODE_END(el0_error_invalid)
587 SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
588 inv_entry 0, BAD_FIQ, 32
589 SYM_CODE_END(el0_fiq_invalid_compat)
592 SYM_CODE_START_LOCAL(el1_sync_invalid)
593 inv_entry 1, BAD_SYNC
594 SYM_CODE_END(el1_sync_invalid)
596 SYM_CODE_START_LOCAL(el1_irq_invalid)
598 SYM_CODE_END(el1_irq_invalid)
600 SYM_CODE_START_LOCAL(el1_fiq_invalid)
602 SYM_CODE_END(el1_fiq_invalid)
604 SYM_CODE_START_LOCAL(el1_error_invalid)
605 inv_entry 1, BAD_ERROR
606 SYM_CODE_END(el1_error_invalid)
612 SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
617 SYM_CODE_END(el1_sync)
620 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
622 gic_prio_irq_setup pmr=x20, tmp=x1
625 #ifdef CONFIG_ARM64_PSEUDO_NMI
626 test_irqs_unmasked res=x0, pmr=x20
632 #ifdef CONFIG_TRACE_IRQFLAGS
633 bl trace_hardirqs_off
638 #ifdef CONFIG_PREEMPTION
639 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
640 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
642 * DA_F were cleared at start of handling. If anything is set in DAIF,
643 * we come back from an NMI, so skip preemption
647 alternative_else_nop_endif
648 cbnz x24, 1f // preempt count != 0 || NMI return path
649 bl arm64_preempt_schedule_irq // irq en/disable is done inside
653 #ifdef CONFIG_ARM64_PSEUDO_NMI
655 * When using IRQ priority masking, we can get spurious interrupts while
656 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
657 * section with interrupts disabled. Skip tracing in those cases.
659 test_irqs_unmasked res=x0, pmr=x20
665 #ifdef CONFIG_TRACE_IRQFLAGS
666 #ifdef CONFIG_ARM64_PSEUDO_NMI
667 test_irqs_unmasked res=x0, pmr=x20
675 SYM_CODE_END(el1_irq)
681 SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
686 SYM_CODE_END(el0_sync)
690 SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
693 bl el0_sync_compat_handler
695 SYM_CODE_END(el0_sync_compat)
698 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
701 SYM_CODE_END(el0_irq_compat)
703 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
706 SYM_CODE_END(el0_error_compat)
710 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
713 gic_prio_irq_setup pmr=x20, tmp=x0
717 #ifdef CONFIG_TRACE_IRQFLAGS
718 bl trace_hardirqs_off
721 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
723 bl do_el0_irq_bp_hardening
728 #ifdef CONFIG_TRACE_IRQFLAGS
732 SYM_CODE_END(el0_irq)
734 SYM_CODE_START_LOCAL(el1_error)
737 gic_prio_kentry_setup tmp=x2
742 SYM_CODE_END(el1_error)
744 SYM_CODE_START_LOCAL(el0_error)
748 gic_prio_kentry_setup tmp=x2
756 SYM_CODE_END(el0_error)
759 * "slow" syscall return path.
761 SYM_CODE_START_LOCAL(ret_to_user)
763 gic_prio_kentry_setup tmp=x3
764 ldr x1, [tsk, #TSK_TI_FLAGS]
765 and x2, x1, #_TIF_WORK_MASK
766 cbnz x2, work_pending
768 enable_step_tsk x1, x2
769 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
775 * Ok, we need to do extra processing, enter the slow path.
780 #ifdef CONFIG_TRACE_IRQFLAGS
781 bl trace_hardirqs_on // enabled while in userspace
783 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
785 SYM_CODE_END(ret_to_user)
787 .popsection // .entry.text
789 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
791 * Exception vectors trampoline.
793 .pushsection ".entry.tramp.text", "ax"
795 .macro tramp_map_kernel, tmp
797 add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
798 bic \tmp, \tmp, #USER_ASID_FLAG
800 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
801 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
802 /* ASID already in \tmp[63:48] */
803 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
804 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
805 /* 2MB boundary containing the vectors, so we nobble the walk cache */
806 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
810 alternative_else_nop_endif
811 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
814 .macro tramp_unmap_kernel, tmp
816 sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
817 orr \tmp, \tmp, #USER_ASID_FLAG
820 * We avoid running the post_ttbr_update_workaround here because
821 * it's only needed by Cavium ThunderX, which requires KPTI to be
826 .macro tramp_ventry, regsize = 64
830 msr tpidrro_el0, x30 // Restored in kernel_ventry
833 * Defend against branch aliasing attacks by pushing a dummy
834 * entry onto the return stack and using a RET instruction to
835 * enter the full-fat kernel vectors.
841 #ifdef CONFIG_RANDOMIZE_BASE
842 adr x30, tramp_vectors + PAGE_SIZE
843 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
848 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
849 prfm plil1strm, [x30, #(1b - tramp_vectors)]
850 alternative_else_nop_endif
852 add x30, x30, #(1b - tramp_vectors)
857 .macro tramp_exit, regsize = 64
858 adr x30, tramp_vectors
860 tramp_unmap_kernel x30
869 SYM_CODE_START_NOALIGN(tramp_vectors)
881 SYM_CODE_END(tramp_vectors)
883 SYM_CODE_START(tramp_exit_native)
885 SYM_CODE_END(tramp_exit_native)
887 SYM_CODE_START(tramp_exit_compat)
889 SYM_CODE_END(tramp_exit_compat)
892 .popsection // .entry.tramp.text
893 #ifdef CONFIG_RANDOMIZE_BASE
894 .pushsection ".rodata", "a"
896 SYM_DATA_START(__entry_tramp_data_start)
898 SYM_DATA_END(__entry_tramp_data_start)
899 .popsection // .rodata
900 #endif /* CONFIG_RANDOMIZE_BASE */
901 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
904 * Register switch for AArch64. The callee-saved registers need to be saved
905 * and restored. On entry:
906 * x0 = previous task_struct (must be preserved across the switch)
907 * x1 = next task_struct
908 * Previous and next are guaranteed not to be the same.
911 SYM_FUNC_START(cpu_switch_to)
912 mov x10, #THREAD_CPU_CONTEXT
915 stp x19, x20, [x8], #16 // store callee-saved registers
916 stp x21, x22, [x8], #16
917 stp x23, x24, [x8], #16
918 stp x25, x26, [x8], #16
919 stp x27, x28, [x8], #16
920 stp x29, x9, [x8], #16
923 ldp x19, x20, [x8], #16 // restore callee-saved registers
924 ldp x21, x22, [x8], #16
925 ldp x23, x24, [x8], #16
926 ldp x25, x26, [x8], #16
927 ldp x27, x28, [x8], #16
928 ldp x29, x9, [x8], #16
932 ptrauth_keys_install_kernel x1, x8, x9, x10
936 SYM_FUNC_END(cpu_switch_to)
937 NOKPROBE(cpu_switch_to)
940 * This is how we return from a fork.
942 SYM_CODE_START(ret_from_fork)
944 cbz x19, 1f // not a kernel thread
947 1: get_current_task tsk
949 SYM_CODE_END(ret_from_fork)
950 NOKPROBE(ret_from_fork)
952 #ifdef CONFIG_ARM_SDE_INTERFACE
954 #include <asm/sdei.h>
955 #include <uapi/linux/arm_sdei.h>
957 .macro sdei_handler_exit exit_mode
958 /* On success, this call never returns... */
959 cmp \exit_mode, #SDEI_EXIT_SMC
967 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
969 * The regular SDEI entry point may have been unmapped along with the rest of
970 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
971 * argument accessible.
973 * This clobbers x4, __sdei_handler() will restore this from firmware's
977 .pushsection ".entry.tramp.text", "ax"
978 SYM_CODE_START(__sdei_asm_entry_trampoline)
980 tbz x4, #USER_ASID_BIT, 1f
982 tramp_map_kernel tmp=x4
987 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
988 * the kernel on exit.
990 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
992 #ifdef CONFIG_RANDOMIZE_BASE
993 adr x4, tramp_vectors + PAGE_SIZE
994 add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
997 ldr x4, =__sdei_asm_handler
1000 SYM_CODE_END(__sdei_asm_entry_trampoline)
1001 NOKPROBE(__sdei_asm_entry_trampoline)
1004 * Make the exit call and restore the original ttbr1_el1
1006 * x0 & x1: setup for the exit API call
1008 * x4: struct sdei_registered_event argument from registration time.
1010 SYM_CODE_START(__sdei_asm_exit_trampoline)
1011 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1014 tramp_unmap_kernel tmp=x4
1016 1: sdei_handler_exit exit_mode=x2
1017 SYM_CODE_END(__sdei_asm_exit_trampoline)
1018 NOKPROBE(__sdei_asm_exit_trampoline)
1020 .popsection // .entry.tramp.text
1021 #ifdef CONFIG_RANDOMIZE_BASE
1022 .pushsection ".rodata", "a"
1023 SYM_DATA_START(__sdei_asm_trampoline_next_handler)
1024 .quad __sdei_asm_handler
1025 SYM_DATA_END(__sdei_asm_trampoline_next_handler)
1026 .popsection // .rodata
1027 #endif /* CONFIG_RANDOMIZE_BASE */
1028 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1031 * Software Delegated Exception entry point.
1034 * x1: struct sdei_registered_event argument from registration time.
1035 * x2: interrupted PC
1036 * x3: interrupted PSTATE
1037 * x4: maybe clobbered by the trampoline
1039 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1040 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1043 SYM_CODE_START(__sdei_asm_handler)
1044 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1045 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1046 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1047 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1048 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1049 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1050 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1051 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1052 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1053 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1054 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1055 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1056 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1057 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1059 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1063 #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1064 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1067 #ifdef CONFIG_VMAP_STACK
1069 * entry.S may have been using sp as a scratch register, find whether
1070 * this is a normal or critical event and switch to the appropriate
1071 * stack for this CPU.
1074 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1076 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1077 2: mov x6, #SDEI_STACK_SIZE
1082 #ifdef CONFIG_SHADOW_CALL_STACK
1083 /* Use a separate shadow call stack for normal and critical events */
1085 adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
1087 3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
1092 * We may have interrupted userspace, or a guest, or exit-from or
1093 * return-to either of these. We can't trust sp_el0, restore it.
1096 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1099 /* If we interrupted the kernel point to the previous stack/frame. */
1103 csel x29, x29, xzr, eq // fp, or zero
1104 csel x4, x2, xzr, eq // elr, or zero
1106 stp x29, x4, [sp, #-16]!
1109 add x0, x19, #SDEI_EVENT_INTREGS
1114 /* restore regs >x17 that we clobbered */
1115 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1116 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1117 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1118 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1121 mov x1, x0 // address to complete_and_resume
1122 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1124 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1125 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1128 ldr_l x2, sdei_exit_mode
1130 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1131 sdei_handler_exit exit_mode=x2
1132 alternative_else_nop_endif
1134 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1135 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1138 SYM_CODE_END(__sdei_asm_handler)
1139 NOKPROBE(__sdei_asm_handler)
1140 #endif /* CONFIG_ARM_SDE_INTERFACE */