1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level exception handling code
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
10 #include <linux/arm-smccc.h>
11 #include <linux/init.h>
12 #include <linux/linkage.h>
14 #include <asm/alternative.h>
15 #include <asm/assembler.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/asm_pointer_auth.h>
19 #include <asm/cpufeature.h>
20 #include <asm/errno.h>
23 #include <asm/memory.h>
25 #include <asm/processor.h>
26 #include <asm/ptrace.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-uaccess.h>
30 #include <asm/unistd.h>
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
36 .macro user_enter_irqoff
37 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
43 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
48 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
50 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
52 alternative_if ARM64_UNMAP_KERNEL_AT_EL0
59 alternative_else_nop_endif
63 sub sp, sp, #PT_REGS_SIZE
64 #ifdef CONFIG_VMAP_STACK
66 * Test whether the SP has overflowed, without corrupting a GPR.
67 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
68 * should always be zero.
70 add sp, sp, x0 // sp' = sp + x0
71 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
72 tbnz x0, #THREAD_SHIFT, 0f
73 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
74 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
75 b el\el\ht\()_\regsize\()_\label
79 * Either we've just detected an overflow, or we've taken an exception
80 * while on the overflow stack. Either way, we won't return to
81 * userspace, and can clobber EL0 registers to free up GPRs.
84 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
87 /* Recover the original x0 value and stash it in tpidrro_el0 */
91 /* Switch to the overflow stack */
92 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
95 * Check whether we were already on the overflow stack. This may happen
96 * after panic() re-enables interrupts.
98 mrs x0, tpidr_el0 // sp of interrupted context
99 sub x0, sp, x0 // delta with top of overflow stack
100 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
101 b.ne __bad_stack // no? -> bad stack pointer
103 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
107 b el\el\ht\()_\regsize\()_\label
110 .macro tramp_alias, dst, sym
111 mov_q \dst, TRAMP_VALIAS
112 add \dst, \dst, #(\sym - .entry.tramp.text)
116 * This macro corrupts x0-x3. It is the caller's duty to save/restore
119 .macro apply_ssbd, state, tmp1, tmp2
120 alternative_cb spectre_v4_patch_fw_mitigation_enable
121 b .L__asm_ssbd_skip\@ // Patched to NOP
123 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
124 cbz \tmp2, .L__asm_ssbd_skip\@
125 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
126 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
127 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
129 alternative_cb spectre_v4_patch_fw_mitigation_conduit
130 nop // Patched to SMC/HVC #0
135 /* Check for MTE asynchronous tag check faults */
136 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
137 #ifdef CONFIG_ARM64_MTE
139 alternative_if_not ARM64_MTE
141 alternative_else_nop_endif
143 * Asynchronous tag check faults are only possible in ASYNC (2) or
144 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
145 * set, so skip the check if it is unset.
147 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
148 mrs_s \tmp, SYS_TFSRE0_EL1
149 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
150 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
151 mov \tmp, #_TIF_MTE_ASYNC_FAULT
152 add \ti_flags, tsk, #TSK_TI_FLAGS
153 stset \tmp, [\ti_flags]
158 /* Clear the MTE asynchronous tag check faults */
159 .macro clear_mte_async_tcf thread_sctlr
160 #ifdef CONFIG_ARM64_MTE
161 alternative_if ARM64_MTE
162 /* See comment in check_mte_async_tcf above. */
163 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
165 msr_s SYS_TFSRE0_EL1, xzr
167 alternative_else_nop_endif
171 .macro mte_set_gcr, mte_ctrl, tmp
172 #ifdef CONFIG_ARM64_MTE
173 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
174 orr \tmp, \tmp, #SYS_GCR_EL1_RRND
175 msr_s SYS_GCR_EL1, \tmp
179 .macro mte_set_kernel_gcr, tmp, tmp2
180 #ifdef CONFIG_KASAN_HW_TAGS
181 alternative_if_not ARM64_MTE
183 alternative_else_nop_endif
184 ldr_l \tmp, gcr_kernel_excl
186 mte_set_gcr \tmp, \tmp2
191 .macro mte_set_user_gcr, tsk, tmp, tmp2
192 #ifdef CONFIG_ARM64_MTE
193 alternative_if_not ARM64_MTE
195 alternative_else_nop_endif
196 ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
198 mte_set_gcr \tmp, \tmp2
203 .macro kernel_entry, el, regsize = 64
205 mov w0, w0 // zero upper 32 bits of x0
207 stp x0, x1, [sp, #16 * 0]
208 stp x2, x3, [sp, #16 * 1]
209 stp x4, x5, [sp, #16 * 2]
210 stp x6, x7, [sp, #16 * 3]
211 stp x8, x9, [sp, #16 * 4]
212 stp x10, x11, [sp, #16 * 5]
213 stp x12, x13, [sp, #16 * 6]
214 stp x14, x15, [sp, #16 * 7]
215 stp x16, x17, [sp, #16 * 8]
216 stp x18, x19, [sp, #16 * 9]
217 stp x20, x21, [sp, #16 * 10]
218 stp x22, x23, [sp, #16 * 11]
219 stp x24, x25, [sp, #16 * 12]
220 stp x26, x27, [sp, #16 * 13]
221 stp x28, x29, [sp, #16 * 14]
226 ldr_this_cpu tsk, __entry_task, x20
230 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
233 ldr x19, [tsk, #TSK_TI_FLAGS]
234 disable_step_tsk x19, x20
236 /* Check for asynchronous tag check faults in user space */
237 ldr x0, [tsk, THREAD_SCTLR_USER]
238 check_mte_async_tcf x22, x23, x0
240 #ifdef CONFIG_ARM64_PTR_AUTH
241 alternative_if ARM64_HAS_ADDRESS_AUTH
243 * Enable IA for in-kernel PAC if the task had it disabled. Although
244 * this could be implemented with an unconditional MRS which would avoid
245 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
247 * Install the kernel IA key only if IA was enabled in the task. If IA
248 * was disabled on kernel exit then we would have left the kernel IA
249 * installed so there is no need to install it again.
251 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
252 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
256 orr x0, x0, SCTLR_ELx_ENIA
259 alternative_else_nop_endif
262 apply_ssbd 1, x22, x23
264 mte_set_kernel_gcr x22, x23
267 * Any non-self-synchronizing system register updates required for
268 * kernel entry should be placed before this point.
270 alternative_if ARM64_MTE
273 alternative_else_nop_endif
274 alternative_if ARM64_HAS_ADDRESS_AUTH
276 alternative_else_nop_endif
281 add x21, sp, #PT_REGS_SIZE
283 .endif /* \el == 0 */
286 stp lr, x21, [sp, #S_LR]
289 * For exceptions from EL0, create a final frame record.
290 * For exceptions from EL1, create a synthetic frame record so the
291 * interrupted code shows up in the backtrace.
294 stp xzr, xzr, [sp, #S_STACKFRAME]
296 stp x29, x22, [sp, #S_STACKFRAME]
298 add x29, sp, #S_STACKFRAME
300 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
301 alternative_if_not ARM64_HAS_PAN
302 bl __swpan_entry_el\el
303 alternative_else_nop_endif
306 stp x22, x23, [sp, #S_PC]
308 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
311 str w21, [sp, #S_SYSCALLNO]
315 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
316 mrs_s x20, SYS_ICC_PMR_EL1
317 str x20, [sp, #S_PMR_SAVE]
318 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
319 msr_s SYS_ICC_PMR_EL1, x20
320 alternative_else_nop_endif
322 /* Re-enable tag checking (TCO set on exception entry) */
323 #ifdef CONFIG_ARM64_MTE
324 alternative_if ARM64_MTE
326 alternative_else_nop_endif
330 * Registers that may be useful after this macro is invoked:
335 * x23 - aborted PSTATE
339 .macro kernel_exit, el
345 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
346 ldr x20, [sp, #S_PMR_SAVE]
347 msr_s SYS_ICC_PMR_EL1, x20
348 mrs_s x21, SYS_ICC_CTLR_EL1
349 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
350 dsb sy // Ensure priority change is seen by redistributor
352 alternative_else_nop_endif
354 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
356 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
357 alternative_if_not ARM64_HAS_PAN
358 bl __swpan_exit_el\el
359 alternative_else_nop_endif
363 ldr x23, [sp, #S_SP] // load return stack pointer
365 tst x22, #PSR_MODE32_BIT // native task?
368 #ifdef CONFIG_ARM64_ERRATUM_845719
369 alternative_if ARM64_WORKAROUND_845719
370 #ifdef CONFIG_PID_IN_CONTEXTIDR
371 mrs x29, contextidr_el1
372 msr contextidr_el1, x29
374 msr contextidr_el1, xzr
376 alternative_else_nop_endif
381 /* Ignore asynchronous tag check faults in the uaccess routines */
382 ldr x0, [tsk, THREAD_SCTLR_USER]
383 clear_mte_async_tcf x0
385 #ifdef CONFIG_ARM64_PTR_AUTH
386 alternative_if ARM64_HAS_ADDRESS_AUTH
388 * IA was enabled for in-kernel PAC. Disable it now if needed, or
389 * alternatively install the user's IA. All other per-task keys and
390 * SCTLR bits were updated on task switch.
392 * No kernel C function calls after this.
394 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
395 __ptrauth_keys_install_user tsk, x0, x1, x2
399 bic x0, x0, SCTLR_ELx_ENIA
402 alternative_else_nop_endif
405 mte_set_user_gcr tsk, x0, x1
410 msr elr_el1, x21 // set up the return data
412 ldp x0, x1, [sp, #16 * 0]
413 ldp x2, x3, [sp, #16 * 1]
414 ldp x4, x5, [sp, #16 * 2]
415 ldp x6, x7, [sp, #16 * 3]
416 ldp x8, x9, [sp, #16 * 4]
417 ldp x10, x11, [sp, #16 * 5]
418 ldp x12, x13, [sp, #16 * 6]
419 ldp x14, x15, [sp, #16 * 7]
420 ldp x16, x17, [sp, #16 * 8]
421 ldp x18, x19, [sp, #16 * 9]
422 ldp x20, x21, [sp, #16 * 10]
423 ldp x22, x23, [sp, #16 * 11]
424 ldp x24, x25, [sp, #16 * 12]
425 ldp x26, x27, [sp, #16 * 13]
426 ldp x28, x29, [sp, #16 * 14]
428 add sp, sp, #PT_REGS_SIZE // restore sp
431 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
432 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
435 tramp_alias x30, tramp_exit_native
438 tramp_alias x30, tramp_exit_compat
442 /* Ensure any device/NC reads complete */
443 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
450 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
452 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
453 * EL0, there is no need to check the state of TTBR0_EL1 since
454 * accesses are always enabled.
455 * Note that the meaning of this bit differs from the ARMv8.1 PAN
456 * feature as all TTBR0_EL1 accesses are disabled, not just those to
459 SYM_CODE_START_LOCAL(__swpan_entry_el1)
461 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
462 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
463 b.eq 1f // TTBR0 access already disabled
464 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
465 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
466 __uaccess_ttbr0_disable x21
468 SYM_CODE_END(__swpan_entry_el1)
471 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
474 SYM_CODE_START_LOCAL(__swpan_exit_el1)
475 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
476 __uaccess_ttbr0_enable x0, x1
477 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
479 SYM_CODE_END(__swpan_exit_el1)
481 SYM_CODE_START_LOCAL(__swpan_exit_el0)
482 __uaccess_ttbr0_enable x0, x1
484 * Enable errata workarounds only if returning to user. The only
485 * workaround currently required for TTBR0_EL1 changes are for the
486 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
489 b post_ttbr_update_workaround
490 SYM_CODE_END(__swpan_exit_el0)
493 /* GPRs used by entry code */
494 tsk .req x28 // current thread_info
497 * Interrupt handling.
499 .macro gic_prio_kentry_setup, tmp:req
500 #ifdef CONFIG_ARM64_PSEUDO_NMI
501 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
502 mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
503 msr_s SYS_ICC_PMR_EL1, \tmp
504 alternative_else_nop_endif
513 .pushsection ".entry.text", "ax"
516 SYM_CODE_START(vectors)
517 kernel_ventry 1, t, 64, sync // Synchronous EL1t
518 kernel_ventry 1, t, 64, irq // IRQ EL1t
519 kernel_ventry 1, t, 64, fiq // FIQ EL1h
520 kernel_ventry 1, t, 64, error // Error EL1t
522 kernel_ventry 1, h, 64, sync // Synchronous EL1h
523 kernel_ventry 1, h, 64, irq // IRQ EL1h
524 kernel_ventry 1, h, 64, fiq // FIQ EL1h
525 kernel_ventry 1, h, 64, error // Error EL1h
527 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
528 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
529 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
530 kernel_ventry 0, t, 64, error // Error 64-bit EL0
532 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
533 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
534 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
535 kernel_ventry 0, t, 32, error // Error 32-bit EL0
536 SYM_CODE_END(vectors)
538 #ifdef CONFIG_VMAP_STACK
540 * We detected an overflow in kernel_ventry, which switched to the
541 * overflow stack. Stash the exception regs, and head to our overflow
545 /* Restore the original x0 value */
549 * Store the original GPRs to the new stack. The orginal SP (minus
550 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
552 sub sp, sp, #PT_REGS_SIZE
555 add x0, x0, #PT_REGS_SIZE
558 /* Stash the regs for handle_bad_stack */
564 #endif /* CONFIG_VMAP_STACK */
567 .macro entry_handler el:req, ht:req, regsize:req, label:req
568 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
569 kernel_entry \el, \regsize
571 bl el\el\ht\()_\regsize\()_\label\()_handler
577 SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
581 * Early exception handlers
583 entry_handler 1, t, 64, sync
584 entry_handler 1, t, 64, irq
585 entry_handler 1, t, 64, fiq
586 entry_handler 1, t, 64, error
588 entry_handler 1, h, 64, sync
589 entry_handler 1, h, 64, irq
590 entry_handler 1, h, 64, fiq
591 entry_handler 1, h, 64, error
593 entry_handler 0, t, 64, sync
594 entry_handler 0, t, 64, irq
595 entry_handler 0, t, 64, fiq
596 entry_handler 0, t, 64, error
598 entry_handler 0, t, 32, sync
599 entry_handler 0, t, 32, irq
600 entry_handler 0, t, 32, fiq
601 entry_handler 0, t, 32, error
603 SYM_CODE_START_LOCAL(ret_to_kernel)
605 SYM_CODE_END(ret_to_kernel)
608 * "slow" syscall return path.
610 SYM_CODE_START_LOCAL(ret_to_user)
612 gic_prio_kentry_setup tmp=x3
613 #ifdef CONFIG_TRACE_IRQFLAGS
614 bl trace_hardirqs_off
616 ldr x19, [tsk, #TSK_TI_FLAGS]
617 and x2, x19, #_TIF_WORK_MASK
618 cbnz x2, work_pending
621 enable_step_tsk x19, x2
622 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
628 * Ok, we need to do extra processing, enter the slow path.
634 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
636 SYM_CODE_END(ret_to_user)
638 .popsection // .entry.text
640 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
642 * Exception vectors trampoline.
644 .pushsection ".entry.tramp.text", "ax"
646 // Move from tramp_pg_dir to swapper_pg_dir
647 .macro tramp_map_kernel, tmp
649 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
650 bic \tmp, \tmp, #USER_ASID_FLAG
652 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
653 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
654 /* ASID already in \tmp[63:48] */
655 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
656 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
657 /* 2MB boundary containing the vectors, so we nobble the walk cache */
658 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
662 alternative_else_nop_endif
663 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
666 // Move from swapper_pg_dir to tramp_pg_dir
667 .macro tramp_unmap_kernel, tmp
669 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
670 orr \tmp, \tmp, #USER_ASID_FLAG
673 * We avoid running the post_ttbr_update_workaround here because
674 * it's only needed by Cavium ThunderX, which requires KPTI to be
679 .macro tramp_ventry, regsize = 64
683 msr tpidrro_el0, x30 // Restored in kernel_ventry
686 * Defend against branch aliasing attacks by pushing a dummy
687 * entry onto the return stack and using a RET instruction to
688 * enter the full-fat kernel vectors.
694 #ifdef CONFIG_RANDOMIZE_BASE
695 adr x30, tramp_vectors + PAGE_SIZE
696 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
701 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
702 prfm plil1strm, [x30, #(1b - tramp_vectors)]
703 alternative_else_nop_endif
705 add x30, x30, #(1b - tramp_vectors)
710 .macro tramp_exit, regsize = 64
711 adr x30, tramp_vectors
713 tramp_unmap_kernel x30
722 SYM_CODE_START_NOALIGN(tramp_vectors)
734 SYM_CODE_END(tramp_vectors)
736 SYM_CODE_START(tramp_exit_native)
738 SYM_CODE_END(tramp_exit_native)
740 SYM_CODE_START(tramp_exit_compat)
742 SYM_CODE_END(tramp_exit_compat)
745 .popsection // .entry.tramp.text
746 #ifdef CONFIG_RANDOMIZE_BASE
747 .pushsection ".rodata", "a"
749 SYM_DATA_START(__entry_tramp_data_start)
751 SYM_DATA_END(__entry_tramp_data_start)
752 .popsection // .rodata
753 #endif /* CONFIG_RANDOMIZE_BASE */
754 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
757 * Register switch for AArch64. The callee-saved registers need to be saved
758 * and restored. On entry:
759 * x0 = previous task_struct (must be preserved across the switch)
760 * x1 = next task_struct
761 * Previous and next are guaranteed not to be the same.
764 SYM_FUNC_START(cpu_switch_to)
765 mov x10, #THREAD_CPU_CONTEXT
768 stp x19, x20, [x8], #16 // store callee-saved registers
769 stp x21, x22, [x8], #16
770 stp x23, x24, [x8], #16
771 stp x25, x26, [x8], #16
772 stp x27, x28, [x8], #16
773 stp x29, x9, [x8], #16
776 ldp x19, x20, [x8], #16 // restore callee-saved registers
777 ldp x21, x22, [x8], #16
778 ldp x23, x24, [x8], #16
779 ldp x25, x26, [x8], #16
780 ldp x27, x28, [x8], #16
781 ldp x29, x9, [x8], #16
785 ptrauth_keys_install_kernel x1, x8, x9, x10
789 SYM_FUNC_END(cpu_switch_to)
790 NOKPROBE(cpu_switch_to)
793 * This is how we return from a fork.
795 SYM_CODE_START(ret_from_fork)
797 cbz x19, 1f // not a kernel thread
800 1: get_current_task tsk
802 SYM_CODE_END(ret_from_fork)
803 NOKPROBE(ret_from_fork)
806 * void call_on_irq_stack(struct pt_regs *regs,
807 * void (*func)(struct pt_regs *));
809 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
811 SYM_FUNC_START(call_on_irq_stack)
812 #ifdef CONFIG_SHADOW_CALL_STACK
813 stp scs_sp, xzr, [sp, #-16]!
814 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
816 /* Create a frame record to save our LR and SP (implicit in FP) */
817 stp x29, x30, [sp, #-16]!
820 ldr_this_cpu x16, irq_stack_ptr, x17
821 mov x15, #IRQ_STACK_SIZE
824 /* Move to the new stack and call the function there */
829 * Restore the SP from the FP, and restore the FP and LR from the frame
833 ldp x29, x30, [sp], #16
834 #ifdef CONFIG_SHADOW_CALL_STACK
835 ldp scs_sp, xzr, [sp], #16
838 SYM_FUNC_END(call_on_irq_stack)
839 NOKPROBE(call_on_irq_stack)
841 #ifdef CONFIG_ARM_SDE_INTERFACE
843 #include <asm/sdei.h>
844 #include <uapi/linux/arm_sdei.h>
846 .macro sdei_handler_exit exit_mode
847 /* On success, this call never returns... */
848 cmp \exit_mode, #SDEI_EXIT_SMC
856 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
858 * The regular SDEI entry point may have been unmapped along with the rest of
859 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
860 * argument accessible.
862 * This clobbers x4, __sdei_handler() will restore this from firmware's
866 .pushsection ".entry.tramp.text", "ax"
867 SYM_CODE_START(__sdei_asm_entry_trampoline)
869 tbz x4, #USER_ASID_BIT, 1f
871 tramp_map_kernel tmp=x4
876 * Remember whether to unmap the kernel on exit.
878 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
880 #ifdef CONFIG_RANDOMIZE_BASE
881 adr x4, tramp_vectors + PAGE_SIZE
882 add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
885 ldr x4, =__sdei_asm_handler
888 SYM_CODE_END(__sdei_asm_entry_trampoline)
889 NOKPROBE(__sdei_asm_entry_trampoline)
892 * Make the exit call and restore the original ttbr1_el1
894 * x0 & x1: setup for the exit API call
896 * x4: struct sdei_registered_event argument from registration time.
898 SYM_CODE_START(__sdei_asm_exit_trampoline)
899 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
902 tramp_unmap_kernel tmp=x4
904 1: sdei_handler_exit exit_mode=x2
905 SYM_CODE_END(__sdei_asm_exit_trampoline)
906 NOKPROBE(__sdei_asm_exit_trampoline)
908 .popsection // .entry.tramp.text
909 #ifdef CONFIG_RANDOMIZE_BASE
910 .pushsection ".rodata", "a"
911 SYM_DATA_START(__sdei_asm_trampoline_next_handler)
912 .quad __sdei_asm_handler
913 SYM_DATA_END(__sdei_asm_trampoline_next_handler)
914 .popsection // .rodata
915 #endif /* CONFIG_RANDOMIZE_BASE */
916 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
919 * Software Delegated Exception entry point.
922 * x1: struct sdei_registered_event argument from registration time.
924 * x3: interrupted PSTATE
925 * x4: maybe clobbered by the trampoline
927 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
928 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
931 SYM_CODE_START(__sdei_asm_handler)
932 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
933 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
934 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
935 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
936 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
937 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
938 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
939 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
940 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
941 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
942 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
943 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
944 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
945 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
947 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
951 #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
952 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
955 #ifdef CONFIG_VMAP_STACK
957 * entry.S may have been using sp as a scratch register, find whether
958 * this is a normal or critical event and switch to the appropriate
959 * stack for this CPU.
962 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
964 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
965 2: mov x6, #SDEI_STACK_SIZE
970 #ifdef CONFIG_SHADOW_CALL_STACK
971 /* Use a separate shadow call stack for normal and critical events */
973 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
975 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
980 * We may have interrupted userspace, or a guest, or exit-from or
981 * return-to either of these. We can't trust sp_el0, restore it.
984 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
987 /* If we interrupted the kernel point to the previous stack/frame. */
991 csel x29, x29, xzr, eq // fp, or zero
992 csel x4, x2, xzr, eq // elr, or zero
994 stp x29, x4, [sp, #-16]!
997 add x0, x19, #SDEI_EVENT_INTREGS
1002 /* restore regs >x17 that we clobbered */
1003 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1004 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1005 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1006 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1009 mov x1, x0 // address to complete_and_resume
1010 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1012 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1013 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1016 ldr_l x2, sdei_exit_mode
1018 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1019 sdei_handler_exit exit_mode=x2
1020 alternative_else_nop_endif
1022 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1023 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1026 SYM_CODE_END(__sdei_asm_handler)
1027 NOKPROBE(__sdei_asm_handler)
1028 #endif /* CONFIG_ARM_SDE_INTERFACE */