1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
41 mte_check_tfsr_entry();
45 * This is intended to match the logic in irqentry_exit(), handling the kernel
46 * mode transitions only, and with preemption handled elsewhere.
48 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
50 lockdep_assert_irqs_disabled();
52 mte_check_tfsr_exit();
54 if (interrupts_enabled(regs)) {
56 trace_hardirqs_on_prepare();
57 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
59 lockdep_hardirqs_on(CALLER_ADDR0);
70 void noinstr arm64_enter_nmi(struct pt_regs *regs)
72 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
75 lockdep_hardirqs_off(CALLER_ADDR0);
76 lockdep_hardirq_enter();
79 trace_hardirqs_off_finish();
83 void noinstr arm64_exit_nmi(struct pt_regs *regs)
85 bool restore = regs->lockdep_hardirqs;
89 trace_hardirqs_on_prepare();
90 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
94 lockdep_hardirq_exit();
96 lockdep_hardirqs_on(CALLER_ADDR0);
100 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
102 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
103 arm64_enter_nmi(regs);
105 enter_from_kernel_mode(regs);
108 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
110 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 arm64_exit_nmi(regs);
113 exit_to_kernel_mode(regs);
116 #ifdef CONFIG_ARM64_ERRATUM_1463225
117 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
119 static void cortex_a76_erratum_1463225_svc_handler(void)
123 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
126 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
129 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
130 reg = read_sysreg(mdscr_el1);
131 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
132 write_sysreg(val, mdscr_el1);
133 asm volatile("msr daifclr, #8");
136 /* We will have taken a single-step exception by this point */
138 write_sysreg(reg, mdscr_el1);
139 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
142 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
144 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
148 * We've taken a dummy step exception from the kernel to ensure
149 * that interrupts are re-enabled on the syscall path. Return back
150 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
151 * masked so that we can safely restore the mdscr and get on with
152 * handling the syscall.
154 regs->pstate |= PSR_D_BIT;
157 #else /* CONFIG_ARM64_ERRATUM_1463225 */
158 static void cortex_a76_erratum_1463225_svc_handler(void) { }
159 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
163 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
165 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
167 unsigned long far = read_sysreg(far_el1);
169 enter_from_kernel_mode(regs);
170 local_daif_inherit(regs);
171 do_mem_abort(far, esr, regs);
173 exit_to_kernel_mode(regs);
176 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
178 unsigned long far = read_sysreg(far_el1);
180 enter_from_kernel_mode(regs);
181 local_daif_inherit(regs);
182 do_sp_pc_abort(far, esr, regs);
184 exit_to_kernel_mode(regs);
187 static void noinstr el1_undef(struct pt_regs *regs)
189 enter_from_kernel_mode(regs);
190 local_daif_inherit(regs);
193 exit_to_kernel_mode(regs);
196 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
198 enter_from_kernel_mode(regs);
199 local_daif_inherit(regs);
200 bad_mode(regs, 0, esr);
202 exit_to_kernel_mode(regs);
205 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
207 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
209 lockdep_hardirqs_off(CALLER_ADDR0);
212 trace_hardirqs_off_finish();
215 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
217 bool restore = regs->lockdep_hardirqs;
220 trace_hardirqs_on_prepare();
221 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
226 lockdep_hardirqs_on(CALLER_ADDR0);
229 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
231 unsigned long far = read_sysreg(far_el1);
233 arm64_enter_el1_dbg(regs);
234 if (!cortex_a76_erratum_1463225_debug_handler(regs))
235 do_debug_exception(far, esr, regs);
236 arm64_exit_el1_dbg(regs);
239 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
241 enter_from_kernel_mode(regs);
242 local_daif_inherit(regs);
243 do_ptrauth_fault(regs, esr);
245 exit_to_kernel_mode(regs);
248 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
250 unsigned long esr = read_sysreg(esr_el1);
252 switch (ESR_ELx_EC(esr)) {
253 case ESR_ELx_EC_DABT_CUR:
254 case ESR_ELx_EC_IABT_CUR:
255 el1_abort(regs, esr);
258 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
259 * recursive exception when trying to push the initial pt_regs.
261 case ESR_ELx_EC_PC_ALIGN:
264 case ESR_ELx_EC_SYS64:
265 case ESR_ELx_EC_UNKNOWN:
268 case ESR_ELx_EC_BREAKPT_CUR:
269 case ESR_ELx_EC_SOFTSTP_CUR:
270 case ESR_ELx_EC_WATCHPT_CUR:
271 case ESR_ELx_EC_BRK64:
274 case ESR_ELx_EC_FPAC:
282 asmlinkage void noinstr enter_from_user_mode(void)
284 lockdep_hardirqs_off(CALLER_ADDR0);
285 CT_WARN_ON(ct_state() != CONTEXT_USER);
287 trace_hardirqs_off_finish();
290 asmlinkage void noinstr exit_to_user_mode(void)
292 mte_check_tfsr_exit();
294 trace_hardirqs_on_prepare();
295 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
297 lockdep_hardirqs_on(CALLER_ADDR0);
300 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
302 unsigned long far = read_sysreg(far_el1);
304 enter_from_user_mode();
305 local_daif_restore(DAIF_PROCCTX);
306 do_mem_abort(far, esr, regs);
309 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
311 unsigned long far = read_sysreg(far_el1);
314 * We've taken an instruction abort from userspace and not yet
315 * re-enabled IRQs. If the address is a kernel address, apply
316 * BP hardening prior to enabling IRQs and pre-emption.
318 if (!is_ttbr0_addr(far))
319 arm64_apply_bp_hardening();
321 enter_from_user_mode();
322 local_daif_restore(DAIF_PROCCTX);
323 do_mem_abort(far, esr, regs);
326 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
328 enter_from_user_mode();
329 local_daif_restore(DAIF_PROCCTX);
330 do_fpsimd_acc(esr, regs);
333 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
335 enter_from_user_mode();
336 local_daif_restore(DAIF_PROCCTX);
337 do_sve_acc(esr, regs);
340 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
342 enter_from_user_mode();
343 local_daif_restore(DAIF_PROCCTX);
344 do_fpsimd_exc(esr, regs);
347 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
349 enter_from_user_mode();
350 local_daif_restore(DAIF_PROCCTX);
351 do_sysinstr(esr, regs);
354 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
356 unsigned long far = read_sysreg(far_el1);
358 if (!is_ttbr0_addr(instruction_pointer(regs)))
359 arm64_apply_bp_hardening();
361 enter_from_user_mode();
362 local_daif_restore(DAIF_PROCCTX);
363 do_sp_pc_abort(far, esr, regs);
366 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
368 enter_from_user_mode();
369 local_daif_restore(DAIF_PROCCTX);
370 do_sp_pc_abort(regs->sp, esr, regs);
373 static void noinstr el0_undef(struct pt_regs *regs)
375 enter_from_user_mode();
376 local_daif_restore(DAIF_PROCCTX);
380 static void noinstr el0_bti(struct pt_regs *regs)
382 enter_from_user_mode();
383 local_daif_restore(DAIF_PROCCTX);
387 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
389 enter_from_user_mode();
390 local_daif_restore(DAIF_PROCCTX);
391 bad_el0_sync(regs, 0, esr);
394 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
396 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
397 unsigned long far = read_sysreg(far_el1);
399 enter_from_user_mode();
400 do_debug_exception(far, esr, regs);
401 local_daif_restore(DAIF_PROCCTX_NOIRQ);
404 static void noinstr el0_svc(struct pt_regs *regs)
406 enter_from_user_mode();
407 cortex_a76_erratum_1463225_svc_handler();
411 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
413 enter_from_user_mode();
414 local_daif_restore(DAIF_PROCCTX);
415 do_ptrauth_fault(regs, esr);
418 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
420 unsigned long esr = read_sysreg(esr_el1);
422 switch (ESR_ELx_EC(esr)) {
423 case ESR_ELx_EC_SVC64:
426 case ESR_ELx_EC_DABT_LOW:
429 case ESR_ELx_EC_IABT_LOW:
432 case ESR_ELx_EC_FP_ASIMD:
433 el0_fpsimd_acc(regs, esr);
436 el0_sve_acc(regs, esr);
438 case ESR_ELx_EC_FP_EXC64:
439 el0_fpsimd_exc(regs, esr);
441 case ESR_ELx_EC_SYS64:
445 case ESR_ELx_EC_SP_ALIGN:
448 case ESR_ELx_EC_PC_ALIGN:
451 case ESR_ELx_EC_UNKNOWN:
457 case ESR_ELx_EC_BREAKPT_LOW:
458 case ESR_ELx_EC_SOFTSTP_LOW:
459 case ESR_ELx_EC_WATCHPT_LOW:
460 case ESR_ELx_EC_BRK64:
463 case ESR_ELx_EC_FPAC:
472 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
474 enter_from_user_mode();
475 local_daif_restore(DAIF_PROCCTX);
476 do_cp15instr(esr, regs);
479 static void noinstr el0_svc_compat(struct pt_regs *regs)
481 enter_from_user_mode();
482 cortex_a76_erratum_1463225_svc_handler();
483 do_el0_svc_compat(regs);
486 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
488 unsigned long esr = read_sysreg(esr_el1);
490 switch (ESR_ELx_EC(esr)) {
491 case ESR_ELx_EC_SVC32:
492 el0_svc_compat(regs);
494 case ESR_ELx_EC_DABT_LOW:
497 case ESR_ELx_EC_IABT_LOW:
500 case ESR_ELx_EC_FP_ASIMD:
501 el0_fpsimd_acc(regs, esr);
503 case ESR_ELx_EC_FP_EXC32:
504 el0_fpsimd_exc(regs, esr);
506 case ESR_ELx_EC_PC_ALIGN:
509 case ESR_ELx_EC_UNKNOWN:
510 case ESR_ELx_EC_CP14_MR:
511 case ESR_ELx_EC_CP14_LS:
512 case ESR_ELx_EC_CP14_64:
515 case ESR_ELx_EC_CP15_32:
516 case ESR_ELx_EC_CP15_64:
519 case ESR_ELx_EC_BREAKPT_LOW:
520 case ESR_ELx_EC_SOFTSTP_LOW:
521 case ESR_ELx_EC_WATCHPT_LOW:
522 case ESR_ELx_EC_BKPT32:
529 #endif /* CONFIG_COMPAT */