1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/linkage.h>
10 #include <linux/lockdep.h>
11 #include <linux/ptrace.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/thread_info.h>
16 #include <asm/cpufeature.h>
17 #include <asm/daifflags.h>
19 #include <asm/exception.h>
20 #include <asm/kprobes.h>
22 #include <asm/processor.h>
23 #include <asm/stacktrace.h>
24 #include <asm/sysreg.h>
27 * This is intended to match the logic in irqentry_enter(), handling the kernel
28 * mode transitions only.
30 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
32 regs->exit_rcu = false;
34 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
35 lockdep_hardirqs_off(CALLER_ADDR0);
37 trace_hardirqs_off_finish();
39 regs->exit_rcu = true;
43 lockdep_hardirqs_off(CALLER_ADDR0);
44 rcu_irq_enter_check_tick();
45 trace_hardirqs_off_finish();
47 mte_check_tfsr_entry();
51 * This is intended to match the logic in irqentry_exit(), handling the kernel
52 * mode transitions only, and with preemption handled elsewhere.
54 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
56 lockdep_assert_irqs_disabled();
58 mte_check_tfsr_exit();
60 if (interrupts_enabled(regs)) {
62 trace_hardirqs_on_prepare();
63 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
65 lockdep_hardirqs_on(CALLER_ADDR0);
76 void noinstr arm64_enter_nmi(struct pt_regs *regs)
78 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
81 lockdep_hardirqs_off(CALLER_ADDR0);
82 lockdep_hardirq_enter();
85 trace_hardirqs_off_finish();
89 void noinstr arm64_exit_nmi(struct pt_regs *regs)
91 bool restore = regs->lockdep_hardirqs;
95 trace_hardirqs_on_prepare();
96 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
100 lockdep_hardirq_exit();
102 lockdep_hardirqs_on(CALLER_ADDR0);
106 static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
108 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
109 arm64_enter_nmi(regs);
111 enter_from_kernel_mode(regs);
114 static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
116 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
117 arm64_exit_nmi(regs);
119 exit_to_kernel_mode(regs);
122 static void __sched arm64_preempt_schedule_irq(void)
124 lockdep_assert_irqs_disabled();
127 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
128 * priority masking is used the GIC irqchip driver will clear DAIF.IF
129 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
130 * DAIF we must have handled an NMI, so skip preemption.
132 if (system_uses_irq_prio_masking() && read_sysreg(daif))
136 * Preempting a task from an IRQ means we leave copies of PSTATE
137 * on the stack. cpufeature's enable calls may modify PSTATE, but
138 * resuming one of these preempted tasks would undo those changes.
140 * Only allow a task to be preempted once cpufeatures have been
143 if (system_capabilities_finalized())
144 preempt_schedule_irq();
147 static void do_interrupt_handler(struct pt_regs *regs,
148 void (*handler)(struct pt_regs *))
150 if (on_thread_stack())
151 call_on_irq_stack(regs, handler);
156 extern void (*handle_arch_irq)(struct pt_regs *);
157 extern void (*handle_arch_fiq)(struct pt_regs *);
159 #ifdef CONFIG_ARM64_ERRATUM_1463225
160 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
162 static void cortex_a76_erratum_1463225_svc_handler(void)
166 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
169 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
172 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
173 reg = read_sysreg(mdscr_el1);
174 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
175 write_sysreg(val, mdscr_el1);
176 asm volatile("msr daifclr, #8");
179 /* We will have taken a single-step exception by this point */
181 write_sysreg(reg, mdscr_el1);
182 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
185 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
187 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
191 * We've taken a dummy step exception from the kernel to ensure
192 * that interrupts are re-enabled on the syscall path. Return back
193 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
194 * masked so that we can safely restore the mdscr and get on with
195 * handling the syscall.
197 regs->pstate |= PSR_D_BIT;
200 #else /* CONFIG_ARM64_ERRATUM_1463225 */
201 static void cortex_a76_erratum_1463225_svc_handler(void) { }
202 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
206 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
208 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
210 unsigned long far = read_sysreg(far_el1);
212 enter_from_kernel_mode(regs);
213 local_daif_inherit(regs);
214 do_mem_abort(far, esr, regs);
216 exit_to_kernel_mode(regs);
219 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
221 unsigned long far = read_sysreg(far_el1);
223 enter_from_kernel_mode(regs);
224 local_daif_inherit(regs);
225 do_sp_pc_abort(far, esr, regs);
227 exit_to_kernel_mode(regs);
230 static void noinstr el1_undef(struct pt_regs *regs)
232 enter_from_kernel_mode(regs);
233 local_daif_inherit(regs);
236 exit_to_kernel_mode(regs);
239 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
241 enter_from_kernel_mode(regs);
242 local_daif_inherit(regs);
243 bad_mode(regs, 0, esr);
245 exit_to_kernel_mode(regs);
248 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
250 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
252 lockdep_hardirqs_off(CALLER_ADDR0);
255 trace_hardirqs_off_finish();
258 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
260 bool restore = regs->lockdep_hardirqs;
263 trace_hardirqs_on_prepare();
264 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
269 lockdep_hardirqs_on(CALLER_ADDR0);
272 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
274 unsigned long far = read_sysreg(far_el1);
276 arm64_enter_el1_dbg(regs);
277 if (!cortex_a76_erratum_1463225_debug_handler(regs))
278 do_debug_exception(far, esr, regs);
279 arm64_exit_el1_dbg(regs);
282 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
284 enter_from_kernel_mode(regs);
285 local_daif_inherit(regs);
286 do_ptrauth_fault(regs, esr);
288 exit_to_kernel_mode(regs);
291 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
293 unsigned long esr = read_sysreg(esr_el1);
295 switch (ESR_ELx_EC(esr)) {
296 case ESR_ELx_EC_DABT_CUR:
297 case ESR_ELx_EC_IABT_CUR:
298 el1_abort(regs, esr);
301 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
302 * recursive exception when trying to push the initial pt_regs.
304 case ESR_ELx_EC_PC_ALIGN:
307 case ESR_ELx_EC_SYS64:
308 case ESR_ELx_EC_UNKNOWN:
311 case ESR_ELx_EC_BREAKPT_CUR:
312 case ESR_ELx_EC_SOFTSTP_CUR:
313 case ESR_ELx_EC_WATCHPT_CUR:
314 case ESR_ELx_EC_BRK64:
317 case ESR_ELx_EC_FPAC:
325 static void noinstr el1_interrupt(struct pt_regs *regs,
326 void (*handler)(struct pt_regs *))
328 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
330 enter_el1_irq_or_nmi(regs);
331 do_interrupt_handler(regs, handler);
334 * Note: thread_info::preempt_count includes both thread_info::count
335 * and thread_info::need_resched, and is not equivalent to
338 if (IS_ENABLED(CONFIG_PREEMPTION) &&
339 READ_ONCE(current_thread_info()->preempt_count) == 0)
340 arm64_preempt_schedule_irq();
342 exit_el1_irq_or_nmi(regs);
345 asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs)
347 el1_interrupt(regs, handle_arch_irq);
350 asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs)
352 el1_interrupt(regs, handle_arch_fiq);
355 asmlinkage void noinstr el1_error_handler(struct pt_regs *regs)
357 unsigned long esr = read_sysreg(esr_el1);
359 local_daif_restore(DAIF_ERRCTX);
360 arm64_enter_nmi(regs);
361 do_serror(regs, esr);
362 arm64_exit_nmi(regs);
365 asmlinkage void noinstr enter_from_user_mode(void)
367 lockdep_hardirqs_off(CALLER_ADDR0);
368 CT_WARN_ON(ct_state() != CONTEXT_USER);
370 trace_hardirqs_off_finish();
373 asmlinkage void noinstr exit_to_user_mode(void)
375 mte_check_tfsr_exit();
377 trace_hardirqs_on_prepare();
378 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
380 lockdep_hardirqs_on(CALLER_ADDR0);
383 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
385 unsigned long far = read_sysreg(far_el1);
387 enter_from_user_mode();
388 local_daif_restore(DAIF_PROCCTX);
389 do_mem_abort(far, esr, regs);
392 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
394 unsigned long far = read_sysreg(far_el1);
397 * We've taken an instruction abort from userspace and not yet
398 * re-enabled IRQs. If the address is a kernel address, apply
399 * BP hardening prior to enabling IRQs and pre-emption.
401 if (!is_ttbr0_addr(far))
402 arm64_apply_bp_hardening();
404 enter_from_user_mode();
405 local_daif_restore(DAIF_PROCCTX);
406 do_mem_abort(far, esr, regs);
409 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
411 enter_from_user_mode();
412 local_daif_restore(DAIF_PROCCTX);
413 do_fpsimd_acc(esr, regs);
416 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
418 enter_from_user_mode();
419 local_daif_restore(DAIF_PROCCTX);
420 do_sve_acc(esr, regs);
423 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
425 enter_from_user_mode();
426 local_daif_restore(DAIF_PROCCTX);
427 do_fpsimd_exc(esr, regs);
430 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
432 enter_from_user_mode();
433 local_daif_restore(DAIF_PROCCTX);
434 do_sysinstr(esr, regs);
437 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
439 unsigned long far = read_sysreg(far_el1);
441 if (!is_ttbr0_addr(instruction_pointer(regs)))
442 arm64_apply_bp_hardening();
444 enter_from_user_mode();
445 local_daif_restore(DAIF_PROCCTX);
446 do_sp_pc_abort(far, esr, regs);
449 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
451 enter_from_user_mode();
452 local_daif_restore(DAIF_PROCCTX);
453 do_sp_pc_abort(regs->sp, esr, regs);
456 static void noinstr el0_undef(struct pt_regs *regs)
458 enter_from_user_mode();
459 local_daif_restore(DAIF_PROCCTX);
463 static void noinstr el0_bti(struct pt_regs *regs)
465 enter_from_user_mode();
466 local_daif_restore(DAIF_PROCCTX);
470 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
472 enter_from_user_mode();
473 local_daif_restore(DAIF_PROCCTX);
474 bad_el0_sync(regs, 0, esr);
477 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
479 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
480 unsigned long far = read_sysreg(far_el1);
482 enter_from_user_mode();
483 do_debug_exception(far, esr, regs);
484 local_daif_restore(DAIF_PROCCTX);
487 static void noinstr el0_svc(struct pt_regs *regs)
489 enter_from_user_mode();
490 cortex_a76_erratum_1463225_svc_handler();
494 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
496 enter_from_user_mode();
497 local_daif_restore(DAIF_PROCCTX);
498 do_ptrauth_fault(regs, esr);
501 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
503 unsigned long esr = read_sysreg(esr_el1);
505 switch (ESR_ELx_EC(esr)) {
506 case ESR_ELx_EC_SVC64:
509 case ESR_ELx_EC_DABT_LOW:
512 case ESR_ELx_EC_IABT_LOW:
515 case ESR_ELx_EC_FP_ASIMD:
516 el0_fpsimd_acc(regs, esr);
519 el0_sve_acc(regs, esr);
521 case ESR_ELx_EC_FP_EXC64:
522 el0_fpsimd_exc(regs, esr);
524 case ESR_ELx_EC_SYS64:
528 case ESR_ELx_EC_SP_ALIGN:
531 case ESR_ELx_EC_PC_ALIGN:
534 case ESR_ELx_EC_UNKNOWN:
540 case ESR_ELx_EC_BREAKPT_LOW:
541 case ESR_ELx_EC_SOFTSTP_LOW:
542 case ESR_ELx_EC_WATCHPT_LOW:
543 case ESR_ELx_EC_BRK64:
546 case ESR_ELx_EC_FPAC:
554 static void noinstr el0_interrupt(struct pt_regs *regs,
555 void (*handler)(struct pt_regs *))
557 enter_from_user_mode();
559 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
561 if (regs->pc & BIT(55))
562 arm64_apply_bp_hardening();
564 do_interrupt_handler(regs, handler);
567 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
569 el0_interrupt(regs, handle_arch_irq);
572 asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs)
574 __el0_irq_handler_common(regs);
577 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
579 el0_interrupt(regs, handle_arch_fiq);
582 asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs)
584 __el0_fiq_handler_common(regs);
587 static void __el0_error_handler_common(struct pt_regs *regs)
589 unsigned long esr = read_sysreg(esr_el1);
591 enter_from_user_mode();
592 local_daif_restore(DAIF_ERRCTX);
593 arm64_enter_nmi(regs);
594 do_serror(regs, esr);
595 arm64_exit_nmi(regs);
596 local_daif_restore(DAIF_PROCCTX);
599 asmlinkage void noinstr el0_error_handler(struct pt_regs *regs)
601 __el0_error_handler_common(regs);
605 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
607 enter_from_user_mode();
608 local_daif_restore(DAIF_PROCCTX);
609 do_cp15instr(esr, regs);
612 static void noinstr el0_svc_compat(struct pt_regs *regs)
614 enter_from_user_mode();
615 cortex_a76_erratum_1463225_svc_handler();
616 do_el0_svc_compat(regs);
619 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
621 unsigned long esr = read_sysreg(esr_el1);
623 switch (ESR_ELx_EC(esr)) {
624 case ESR_ELx_EC_SVC32:
625 el0_svc_compat(regs);
627 case ESR_ELx_EC_DABT_LOW:
630 case ESR_ELx_EC_IABT_LOW:
633 case ESR_ELx_EC_FP_ASIMD:
634 el0_fpsimd_acc(regs, esr);
636 case ESR_ELx_EC_FP_EXC32:
637 el0_fpsimd_exc(regs, esr);
639 case ESR_ELx_EC_PC_ALIGN:
642 case ESR_ELx_EC_UNKNOWN:
643 case ESR_ELx_EC_CP14_MR:
644 case ESR_ELx_EC_CP14_LS:
645 case ESR_ELx_EC_CP14_64:
648 case ESR_ELx_EC_CP15_32:
649 case ESR_ELx_EC_CP15_64:
652 case ESR_ELx_EC_BREAKPT_LOW:
653 case ESR_ELx_EC_SOFTSTP_LOW:
654 case ESR_ELx_EC_WATCHPT_LOW:
655 case ESR_ELx_EC_BKPT32:
663 asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs)
665 __el0_irq_handler_common(regs);
668 asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs)
670 __el0_fiq_handler_common(regs);
673 asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs)
675 __el0_error_handler_common(regs);
677 #endif /* CONFIG_COMPAT */