1 // SPDX-License-Identifier: GPL-2.0
3 * Exception handling code
5 * Copyright (C) 2019 ARM Ltd.
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
18 #include <asm/sysreg.h>
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
26 regs->exit_rcu = false;
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
31 trace_hardirqs_off_finish();
33 regs->exit_rcu = true;
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
48 lockdep_assert_irqs_disabled();
50 if (interrupts_enabled(regs)) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
55 lockdep_hardirqs_on(CALLER_ADDR0);
66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
75 trace_hardirqs_off_finish();
79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
81 bool restore = regs->lockdep_hardirqs;
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
90 lockdep_hardirq_exit();
92 lockdep_hardirqs_on(CALLER_ADDR0);
96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
101 enter_from_kernel_mode(regs);
104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
109 exit_to_kernel_mode(regs);
112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
114 unsigned long far = read_sysreg(far_el1);
116 enter_from_kernel_mode(regs);
117 local_daif_inherit(regs);
118 far = untagged_addr(far);
119 do_mem_abort(far, esr, regs);
121 exit_to_kernel_mode(regs);
124 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
126 unsigned long far = read_sysreg(far_el1);
128 enter_from_kernel_mode(regs);
129 local_daif_inherit(regs);
130 do_sp_pc_abort(far, esr, regs);
132 exit_to_kernel_mode(regs);
135 static void noinstr el1_undef(struct pt_regs *regs)
137 enter_from_kernel_mode(regs);
138 local_daif_inherit(regs);
141 exit_to_kernel_mode(regs);
144 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
146 enter_from_kernel_mode(regs);
147 local_daif_inherit(regs);
148 bad_mode(regs, 0, esr);
150 exit_to_kernel_mode(regs);
153 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
155 unsigned long far = read_sysreg(far_el1);
158 * The CPU masked interrupts, and we are leaving them masked during
159 * do_debug_exception(). Update PMR as if we had called
162 if (system_uses_irq_prio_masking())
163 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
165 do_debug_exception(far, esr, regs);
168 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
170 enter_from_kernel_mode(regs);
171 local_daif_inherit(regs);
172 do_ptrauth_fault(regs, esr);
174 exit_to_kernel_mode(regs);
177 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
179 unsigned long esr = read_sysreg(esr_el1);
181 switch (ESR_ELx_EC(esr)) {
182 case ESR_ELx_EC_DABT_CUR:
183 case ESR_ELx_EC_IABT_CUR:
184 el1_abort(regs, esr);
187 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
188 * recursive exception when trying to push the initial pt_regs.
190 case ESR_ELx_EC_PC_ALIGN:
193 case ESR_ELx_EC_SYS64:
194 case ESR_ELx_EC_UNKNOWN:
197 case ESR_ELx_EC_BREAKPT_CUR:
198 case ESR_ELx_EC_SOFTSTP_CUR:
199 case ESR_ELx_EC_WATCHPT_CUR:
200 case ESR_ELx_EC_BRK64:
203 case ESR_ELx_EC_FPAC:
211 asmlinkage void noinstr enter_from_user_mode(void)
213 lockdep_hardirqs_off(CALLER_ADDR0);
214 CT_WARN_ON(ct_state() != CONTEXT_USER);
216 trace_hardirqs_off_finish();
219 asmlinkage void noinstr exit_to_user_mode(void)
221 trace_hardirqs_on_prepare();
222 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
224 lockdep_hardirqs_on(CALLER_ADDR0);
227 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
229 unsigned long far = read_sysreg(far_el1);
231 enter_from_user_mode();
232 local_daif_restore(DAIF_PROCCTX);
233 far = untagged_addr(far);
234 do_mem_abort(far, esr, regs);
237 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
239 unsigned long far = read_sysreg(far_el1);
242 * We've taken an instruction abort from userspace and not yet
243 * re-enabled IRQs. If the address is a kernel address, apply
244 * BP hardening prior to enabling IRQs and pre-emption.
246 if (!is_ttbr0_addr(far))
247 arm64_apply_bp_hardening();
249 enter_from_user_mode();
250 local_daif_restore(DAIF_PROCCTX);
251 do_mem_abort(far, esr, regs);
254 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
256 enter_from_user_mode();
257 local_daif_restore(DAIF_PROCCTX);
258 do_fpsimd_acc(esr, regs);
261 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
263 enter_from_user_mode();
264 local_daif_restore(DAIF_PROCCTX);
265 do_sve_acc(esr, regs);
268 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
270 enter_from_user_mode();
271 local_daif_restore(DAIF_PROCCTX);
272 do_fpsimd_exc(esr, regs);
275 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
277 enter_from_user_mode();
278 local_daif_restore(DAIF_PROCCTX);
279 do_sysinstr(esr, regs);
282 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
284 unsigned long far = read_sysreg(far_el1);
286 if (!is_ttbr0_addr(instruction_pointer(regs)))
287 arm64_apply_bp_hardening();
289 enter_from_user_mode();
290 local_daif_restore(DAIF_PROCCTX);
291 do_sp_pc_abort(far, esr, regs);
294 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
296 enter_from_user_mode();
297 local_daif_restore(DAIF_PROCCTX);
298 do_sp_pc_abort(regs->sp, esr, regs);
301 static void noinstr el0_undef(struct pt_regs *regs)
303 enter_from_user_mode();
304 local_daif_restore(DAIF_PROCCTX);
308 static void noinstr el0_bti(struct pt_regs *regs)
310 enter_from_user_mode();
311 local_daif_restore(DAIF_PROCCTX);
315 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
317 enter_from_user_mode();
318 local_daif_restore(DAIF_PROCCTX);
319 bad_el0_sync(regs, 0, esr);
322 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
324 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
325 unsigned long far = read_sysreg(far_el1);
327 if (system_uses_irq_prio_masking())
328 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
330 enter_from_user_mode();
331 do_debug_exception(far, esr, regs);
332 local_daif_restore(DAIF_PROCCTX_NOIRQ);
335 static void noinstr el0_svc(struct pt_regs *regs)
337 if (system_uses_irq_prio_masking())
338 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
340 enter_from_user_mode();
344 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
346 enter_from_user_mode();
347 local_daif_restore(DAIF_PROCCTX);
348 do_ptrauth_fault(regs, esr);
351 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
353 unsigned long esr = read_sysreg(esr_el1);
355 switch (ESR_ELx_EC(esr)) {
356 case ESR_ELx_EC_SVC64:
359 case ESR_ELx_EC_DABT_LOW:
362 case ESR_ELx_EC_IABT_LOW:
365 case ESR_ELx_EC_FP_ASIMD:
366 el0_fpsimd_acc(regs, esr);
369 el0_sve_acc(regs, esr);
371 case ESR_ELx_EC_FP_EXC64:
372 el0_fpsimd_exc(regs, esr);
374 case ESR_ELx_EC_SYS64:
378 case ESR_ELx_EC_SP_ALIGN:
381 case ESR_ELx_EC_PC_ALIGN:
384 case ESR_ELx_EC_UNKNOWN:
390 case ESR_ELx_EC_BREAKPT_LOW:
391 case ESR_ELx_EC_SOFTSTP_LOW:
392 case ESR_ELx_EC_WATCHPT_LOW:
393 case ESR_ELx_EC_BRK64:
396 case ESR_ELx_EC_FPAC:
405 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
407 enter_from_user_mode();
408 local_daif_restore(DAIF_PROCCTX);
409 do_cp15instr(esr, regs);
412 static void noinstr el0_svc_compat(struct pt_regs *regs)
414 if (system_uses_irq_prio_masking())
415 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
417 enter_from_user_mode();
418 do_el0_svc_compat(regs);
421 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
423 unsigned long esr = read_sysreg(esr_el1);
425 switch (ESR_ELx_EC(esr)) {
426 case ESR_ELx_EC_SVC32:
427 el0_svc_compat(regs);
429 case ESR_ELx_EC_DABT_LOW:
432 case ESR_ELx_EC_IABT_LOW:
435 case ESR_ELx_EC_FP_ASIMD:
436 el0_fpsimd_acc(regs, esr);
438 case ESR_ELx_EC_FP_EXC32:
439 el0_fpsimd_exc(regs, esr);
441 case ESR_ELx_EC_PC_ALIGN:
444 case ESR_ELx_EC_UNKNOWN:
445 case ESR_ELx_EC_CP14_MR:
446 case ESR_ELx_EC_CP14_LS:
447 case ESR_ELx_EC_CP14_64:
450 case ESR_ELx_EC_CP15_32:
451 case ESR_ELx_EC_CP15_64:
454 case ESR_ELx_EC_BREAKPT_LOW:
455 case ESR_ELx_EC_SOFTSTP_LOW:
456 case ESR_ELx_EC_WATCHPT_LOW:
457 case ESR_ELx_EC_BKPT32:
464 #endif /* CONFIG_COMPAT */