1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
41 mte_check_tfsr_entry();
45 * This is intended to match the logic in irqentry_exit(), handling the kernel
46 * mode transitions only, and with preemption handled elsewhere.
48 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
50 lockdep_assert_irqs_disabled();
52 mte_check_tfsr_exit();
54 if (interrupts_enabled(regs)) {
56 trace_hardirqs_on_prepare();
57 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
59 lockdep_hardirqs_on(CALLER_ADDR0);
70 void noinstr arm64_enter_nmi(struct pt_regs *regs)
72 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
75 lockdep_hardirqs_off(CALLER_ADDR0);
76 lockdep_hardirq_enter();
79 trace_hardirqs_off_finish();
83 void noinstr arm64_exit_nmi(struct pt_regs *regs)
85 bool restore = regs->lockdep_hardirqs;
89 trace_hardirqs_on_prepare();
90 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
94 lockdep_hardirq_exit();
96 lockdep_hardirqs_on(CALLER_ADDR0);
100 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
102 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
103 arm64_enter_nmi(regs);
105 enter_from_kernel_mode(regs);
108 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
110 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 arm64_exit_nmi(regs);
113 exit_to_kernel_mode(regs);
116 #ifdef CONFIG_ARM64_ERRATUM_1463225
117 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
119 static void cortex_a76_erratum_1463225_svc_handler(void)
123 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
126 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
129 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
130 reg = read_sysreg(mdscr_el1);
131 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
132 write_sysreg(val, mdscr_el1);
133 asm volatile("msr daifclr, #8");
136 /* We will have taken a single-step exception by this point */
138 write_sysreg(reg, mdscr_el1);
139 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
142 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
144 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
148 * We've taken a dummy step exception from the kernel to ensure
149 * that interrupts are re-enabled on the syscall path. Return back
150 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
151 * masked so that we can safely restore the mdscr and get on with
152 * handling the syscall.
154 regs->pstate |= PSR_D_BIT;
157 #else /* CONFIG_ARM64_ERRATUM_1463225 */
158 static void cortex_a76_erratum_1463225_svc_handler(void) { }
159 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
163 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
165 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
167 unsigned long far = read_sysreg(far_el1);
169 enter_from_kernel_mode(regs);
170 local_daif_inherit(regs);
171 do_mem_abort(far, esr, regs);
173 exit_to_kernel_mode(regs);
176 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
178 unsigned long far = read_sysreg(far_el1);
180 enter_from_kernel_mode(regs);
181 local_daif_inherit(regs);
182 do_sp_pc_abort(far, esr, regs);
184 exit_to_kernel_mode(regs);
187 static void noinstr el1_undef(struct pt_regs *regs)
189 enter_from_kernel_mode(regs);
190 local_daif_inherit(regs);
193 exit_to_kernel_mode(regs);
196 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
198 enter_from_kernel_mode(regs);
199 local_daif_inherit(regs);
200 bad_mode(regs, 0, esr);
202 exit_to_kernel_mode(regs);
205 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
207 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
209 lockdep_hardirqs_off(CALLER_ADDR0);
212 trace_hardirqs_off_finish();
215 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
217 bool restore = regs->lockdep_hardirqs;
220 trace_hardirqs_on_prepare();
221 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
226 lockdep_hardirqs_on(CALLER_ADDR0);
229 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
231 unsigned long far = read_sysreg(far_el1);
234 * The CPU masked interrupts, and we are leaving them masked during
235 * do_debug_exception(). Update PMR as if we had called
238 if (system_uses_irq_prio_masking())
239 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
241 arm64_enter_el1_dbg(regs);
242 if (!cortex_a76_erratum_1463225_debug_handler(regs))
243 do_debug_exception(far, esr, regs);
244 arm64_exit_el1_dbg(regs);
247 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
249 enter_from_kernel_mode(regs);
250 local_daif_inherit(regs);
251 do_ptrauth_fault(regs, esr);
253 exit_to_kernel_mode(regs);
256 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
258 unsigned long esr = read_sysreg(esr_el1);
260 switch (ESR_ELx_EC(esr)) {
261 case ESR_ELx_EC_DABT_CUR:
262 case ESR_ELx_EC_IABT_CUR:
263 el1_abort(regs, esr);
266 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
267 * recursive exception when trying to push the initial pt_regs.
269 case ESR_ELx_EC_PC_ALIGN:
272 case ESR_ELx_EC_SYS64:
273 case ESR_ELx_EC_UNKNOWN:
276 case ESR_ELx_EC_BREAKPT_CUR:
277 case ESR_ELx_EC_SOFTSTP_CUR:
278 case ESR_ELx_EC_WATCHPT_CUR:
279 case ESR_ELx_EC_BRK64:
282 case ESR_ELx_EC_FPAC:
290 asmlinkage void noinstr enter_from_user_mode(void)
292 lockdep_hardirqs_off(CALLER_ADDR0);
293 CT_WARN_ON(ct_state() != CONTEXT_USER);
295 trace_hardirqs_off_finish();
298 asmlinkage void noinstr exit_to_user_mode(void)
300 mte_check_tfsr_exit();
302 trace_hardirqs_on_prepare();
303 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
305 lockdep_hardirqs_on(CALLER_ADDR0);
308 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
310 unsigned long far = read_sysreg(far_el1);
312 enter_from_user_mode();
313 local_daif_restore(DAIF_PROCCTX);
314 do_mem_abort(far, esr, regs);
317 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
319 unsigned long far = read_sysreg(far_el1);
322 * We've taken an instruction abort from userspace and not yet
323 * re-enabled IRQs. If the address is a kernel address, apply
324 * BP hardening prior to enabling IRQs and pre-emption.
326 if (!is_ttbr0_addr(far))
327 arm64_apply_bp_hardening();
329 enter_from_user_mode();
330 local_daif_restore(DAIF_PROCCTX);
331 do_mem_abort(far, esr, regs);
334 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
336 enter_from_user_mode();
337 local_daif_restore(DAIF_PROCCTX);
338 do_fpsimd_acc(esr, regs);
341 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
343 enter_from_user_mode();
344 local_daif_restore(DAIF_PROCCTX);
345 do_sve_acc(esr, regs);
348 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
350 enter_from_user_mode();
351 local_daif_restore(DAIF_PROCCTX);
352 do_fpsimd_exc(esr, regs);
355 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
357 enter_from_user_mode();
358 local_daif_restore(DAIF_PROCCTX);
359 do_sysinstr(esr, regs);
362 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
364 unsigned long far = read_sysreg(far_el1);
366 if (!is_ttbr0_addr(instruction_pointer(regs)))
367 arm64_apply_bp_hardening();
369 enter_from_user_mode();
370 local_daif_restore(DAIF_PROCCTX);
371 do_sp_pc_abort(far, esr, regs);
374 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
376 enter_from_user_mode();
377 local_daif_restore(DAIF_PROCCTX);
378 do_sp_pc_abort(regs->sp, esr, regs);
381 static void noinstr el0_undef(struct pt_regs *regs)
383 enter_from_user_mode();
384 local_daif_restore(DAIF_PROCCTX);
388 static void noinstr el0_bti(struct pt_regs *regs)
390 enter_from_user_mode();
391 local_daif_restore(DAIF_PROCCTX);
395 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
397 enter_from_user_mode();
398 local_daif_restore(DAIF_PROCCTX);
399 bad_el0_sync(regs, 0, esr);
402 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
404 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
405 unsigned long far = read_sysreg(far_el1);
407 if (system_uses_irq_prio_masking())
408 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
410 enter_from_user_mode();
411 do_debug_exception(far, esr, regs);
412 local_daif_restore(DAIF_PROCCTX_NOIRQ);
415 static void noinstr el0_svc(struct pt_regs *regs)
417 if (system_uses_irq_prio_masking())
418 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
420 enter_from_user_mode();
421 cortex_a76_erratum_1463225_svc_handler();
425 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
427 enter_from_user_mode();
428 local_daif_restore(DAIF_PROCCTX);
429 do_ptrauth_fault(regs, esr);
432 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
434 unsigned long esr = read_sysreg(esr_el1);
436 switch (ESR_ELx_EC(esr)) {
437 case ESR_ELx_EC_SVC64:
440 case ESR_ELx_EC_DABT_LOW:
443 case ESR_ELx_EC_IABT_LOW:
446 case ESR_ELx_EC_FP_ASIMD:
447 el0_fpsimd_acc(regs, esr);
450 el0_sve_acc(regs, esr);
452 case ESR_ELx_EC_FP_EXC64:
453 el0_fpsimd_exc(regs, esr);
455 case ESR_ELx_EC_SYS64:
459 case ESR_ELx_EC_SP_ALIGN:
462 case ESR_ELx_EC_PC_ALIGN:
465 case ESR_ELx_EC_UNKNOWN:
471 case ESR_ELx_EC_BREAKPT_LOW:
472 case ESR_ELx_EC_SOFTSTP_LOW:
473 case ESR_ELx_EC_WATCHPT_LOW:
474 case ESR_ELx_EC_BRK64:
477 case ESR_ELx_EC_FPAC:
486 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
488 enter_from_user_mode();
489 local_daif_restore(DAIF_PROCCTX);
490 do_cp15instr(esr, regs);
493 static void noinstr el0_svc_compat(struct pt_regs *regs)
495 if (system_uses_irq_prio_masking())
496 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
498 enter_from_user_mode();
499 cortex_a76_erratum_1463225_svc_handler();
500 do_el0_svc_compat(regs);
503 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
505 unsigned long esr = read_sysreg(esr_el1);
507 switch (ESR_ELx_EC(esr)) {
508 case ESR_ELx_EC_SVC32:
509 el0_svc_compat(regs);
511 case ESR_ELx_EC_DABT_LOW:
514 case ESR_ELx_EC_IABT_LOW:
517 case ESR_ELx_EC_FP_ASIMD:
518 el0_fpsimd_acc(regs, esr);
520 case ESR_ELx_EC_FP_EXC32:
521 el0_fpsimd_exc(regs, esr);
523 case ESR_ELx_EC_PC_ALIGN:
526 case ESR_ELx_EC_UNKNOWN:
527 case ESR_ELx_EC_CP14_MR:
528 case ESR_ELx_EC_CP14_LS:
529 case ESR_ELx_EC_CP14_64:
532 case ESR_ELx_EC_CP15_32:
533 case ESR_ELx_EC_CP15_64:
536 case ESR_ELx_EC_BREAKPT_LOW:
537 case ESR_ELx_EC_SOFTSTP_LOW:
538 case ESR_ELx_EC_WATCHPT_LOW:
539 case ESR_ELx_EC_BKPT32:
546 #endif /* CONFIG_COMPAT */