1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
48 lockdep_assert_irqs_disabled();
50 if (interrupts_enabled(regs)) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
55 lockdep_hardirqs_on(CALLER_ADDR0);
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
75 trace_hardirqs_off_finish();
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
81 bool restore = regs->lockdep_hardirqs;
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
90 lockdep_hardirq_exit();
92 lockdep_hardirqs_on(CALLER_ADDR0);
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
101 enter_from_kernel_mode(regs);
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
109 exit_to_kernel_mode(regs);
112 #ifdef CONFIG_ARM64_ERRATUM_1463225
113 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
115 static void cortex_a76_erratum_1463225_svc_handler(void)
119 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
122 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
125 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
126 reg = read_sysreg(mdscr_el1);
127 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
128 write_sysreg(val, mdscr_el1);
129 asm volatile("msr daifclr, #8");
132 /* We will have taken a single-step exception by this point */
134 write_sysreg(reg, mdscr_el1);
135 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
138 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
140 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
144 * We've taken a dummy step exception from the kernel to ensure
145 * that interrupts are re-enabled on the syscall path. Return back
146 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
147 * masked so that we can safely restore the mdscr and get on with
148 * handling the syscall.
150 regs->pstate |= PSR_D_BIT;
153 #else /* CONFIG_ARM64_ERRATUM_1463225 */
154 static void cortex_a76_erratum_1463225_svc_handler(void) { }
155 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
159 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
161 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
163 unsigned long far = read_sysreg(far_el1);
165 enter_from_kernel_mode(regs);
166 local_daif_inherit(regs);
167 do_mem_abort(far, esr, regs);
169 exit_to_kernel_mode(regs);
172 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
174 unsigned long far = read_sysreg(far_el1);
176 enter_from_kernel_mode(regs);
177 local_daif_inherit(regs);
178 do_sp_pc_abort(far, esr, regs);
180 exit_to_kernel_mode(regs);
183 static void noinstr el1_undef(struct pt_regs *regs)
185 enter_from_kernel_mode(regs);
186 local_daif_inherit(regs);
189 exit_to_kernel_mode(regs);
192 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
194 enter_from_kernel_mode(regs);
195 local_daif_inherit(regs);
196 bad_mode(regs, 0, esr);
198 exit_to_kernel_mode(regs);
201 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
203 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
205 lockdep_hardirqs_off(CALLER_ADDR0);
208 trace_hardirqs_off_finish();
211 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
213 bool restore = regs->lockdep_hardirqs;
216 trace_hardirqs_on_prepare();
217 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
222 lockdep_hardirqs_on(CALLER_ADDR0);
225 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
227 unsigned long far = read_sysreg(far_el1);
230 * The CPU masked interrupts, and we are leaving them masked during
231 * do_debug_exception(). Update PMR as if we had called
234 if (system_uses_irq_prio_masking())
235 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
237 arm64_enter_el1_dbg(regs);
238 if (!cortex_a76_erratum_1463225_debug_handler(regs))
239 do_debug_exception(far, esr, regs);
240 arm64_exit_el1_dbg(regs);
243 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
245 enter_from_kernel_mode(regs);
246 local_daif_inherit(regs);
247 do_ptrauth_fault(regs, esr);
249 exit_to_kernel_mode(regs);
252 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
254 unsigned long esr = read_sysreg(esr_el1);
256 switch (ESR_ELx_EC(esr)) {
257 case ESR_ELx_EC_DABT_CUR:
258 case ESR_ELx_EC_IABT_CUR:
259 el1_abort(regs, esr);
262 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
263 * recursive exception when trying to push the initial pt_regs.
265 case ESR_ELx_EC_PC_ALIGN:
268 case ESR_ELx_EC_SYS64:
269 case ESR_ELx_EC_UNKNOWN:
272 case ESR_ELx_EC_BREAKPT_CUR:
273 case ESR_ELx_EC_SOFTSTP_CUR:
274 case ESR_ELx_EC_WATCHPT_CUR:
275 case ESR_ELx_EC_BRK64:
278 case ESR_ELx_EC_FPAC:
286 asmlinkage void noinstr enter_from_user_mode(void)
288 lockdep_hardirqs_off(CALLER_ADDR0);
289 CT_WARN_ON(ct_state() != CONTEXT_USER);
291 trace_hardirqs_off_finish();
294 asmlinkage void noinstr exit_to_user_mode(void)
296 trace_hardirqs_on_prepare();
297 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
299 lockdep_hardirqs_on(CALLER_ADDR0);
302 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
304 unsigned long far = read_sysreg(far_el1);
306 enter_from_user_mode();
307 local_daif_restore(DAIF_PROCCTX);
308 do_mem_abort(far, esr, regs);
311 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
313 unsigned long far = read_sysreg(far_el1);
316 * We've taken an instruction abort from userspace and not yet
317 * re-enabled IRQs. If the address is a kernel address, apply
318 * BP hardening prior to enabling IRQs and pre-emption.
320 if (!is_ttbr0_addr(far))
321 arm64_apply_bp_hardening();
323 enter_from_user_mode();
324 local_daif_restore(DAIF_PROCCTX);
325 do_mem_abort(far, esr, regs);
328 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
330 enter_from_user_mode();
331 local_daif_restore(DAIF_PROCCTX);
332 do_fpsimd_acc(esr, regs);
335 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
337 enter_from_user_mode();
338 local_daif_restore(DAIF_PROCCTX);
339 do_sve_acc(esr, regs);
342 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
344 enter_from_user_mode();
345 local_daif_restore(DAIF_PROCCTX);
346 do_fpsimd_exc(esr, regs);
349 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
351 enter_from_user_mode();
352 local_daif_restore(DAIF_PROCCTX);
353 do_sysinstr(esr, regs);
356 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
358 unsigned long far = read_sysreg(far_el1);
360 if (!is_ttbr0_addr(instruction_pointer(regs)))
361 arm64_apply_bp_hardening();
363 enter_from_user_mode();
364 local_daif_restore(DAIF_PROCCTX);
365 do_sp_pc_abort(far, esr, regs);
368 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
370 enter_from_user_mode();
371 local_daif_restore(DAIF_PROCCTX);
372 do_sp_pc_abort(regs->sp, esr, regs);
375 static void noinstr el0_undef(struct pt_regs *regs)
377 enter_from_user_mode();
378 local_daif_restore(DAIF_PROCCTX);
382 static void noinstr el0_bti(struct pt_regs *regs)
384 enter_from_user_mode();
385 local_daif_restore(DAIF_PROCCTX);
389 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
391 enter_from_user_mode();
392 local_daif_restore(DAIF_PROCCTX);
393 bad_el0_sync(regs, 0, esr);
396 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
398 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
399 unsigned long far = read_sysreg(far_el1);
401 if (system_uses_irq_prio_masking())
402 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
404 enter_from_user_mode();
405 do_debug_exception(far, esr, regs);
406 local_daif_restore(DAIF_PROCCTX_NOIRQ);
409 static void noinstr el0_svc(struct pt_regs *regs)
411 if (system_uses_irq_prio_masking())
412 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
414 enter_from_user_mode();
415 cortex_a76_erratum_1463225_svc_handler();
419 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
421 enter_from_user_mode();
422 local_daif_restore(DAIF_PROCCTX);
423 do_ptrauth_fault(regs, esr);
426 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
428 unsigned long esr = read_sysreg(esr_el1);
430 switch (ESR_ELx_EC(esr)) {
431 case ESR_ELx_EC_SVC64:
434 case ESR_ELx_EC_DABT_LOW:
437 case ESR_ELx_EC_IABT_LOW:
440 case ESR_ELx_EC_FP_ASIMD:
441 el0_fpsimd_acc(regs, esr);
444 el0_sve_acc(regs, esr);
446 case ESR_ELx_EC_FP_EXC64:
447 el0_fpsimd_exc(regs, esr);
449 case ESR_ELx_EC_SYS64:
453 case ESR_ELx_EC_SP_ALIGN:
456 case ESR_ELx_EC_PC_ALIGN:
459 case ESR_ELx_EC_UNKNOWN:
465 case ESR_ELx_EC_BREAKPT_LOW:
466 case ESR_ELx_EC_SOFTSTP_LOW:
467 case ESR_ELx_EC_WATCHPT_LOW:
468 case ESR_ELx_EC_BRK64:
471 case ESR_ELx_EC_FPAC:
480 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
482 enter_from_user_mode();
483 local_daif_restore(DAIF_PROCCTX);
484 do_cp15instr(esr, regs);
487 static void noinstr el0_svc_compat(struct pt_regs *regs)
489 if (system_uses_irq_prio_masking())
490 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
492 enter_from_user_mode();
493 cortex_a76_erratum_1463225_svc_handler();
494 do_el0_svc_compat(regs);
497 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
499 unsigned long esr = read_sysreg(esr_el1);
501 switch (ESR_ELx_EC(esr)) {
502 case ESR_ELx_EC_SVC32:
503 el0_svc_compat(regs);
505 case ESR_ELx_EC_DABT_LOW:
508 case ESR_ELx_EC_IABT_LOW:
511 case ESR_ELx_EC_FP_ASIMD:
512 el0_fpsimd_acc(regs, esr);
514 case ESR_ELx_EC_FP_EXC32:
515 el0_fpsimd_exc(regs, esr);
517 case ESR_ELx_EC_PC_ALIGN:
520 case ESR_ELx_EC_UNKNOWN:
521 case ESR_ELx_EC_CP14_MR:
522 case ESR_ELx_EC_CP14_LS:
523 case ESR_ELx_EC_CP14_64:
526 case ESR_ELx_EC_CP15_32:
527 case ESR_ELx_EC_CP15_64:
530 case ESR_ELx_EC_BREAKPT_LOW:
531 case ESR_ELx_EC_SOFTSTP_LOW:
532 case ESR_ELx_EC_WATCHPT_LOW:
533 case ESR_ELx_EC_BKPT32:
540 #endif /* CONFIG_COMPAT */