1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/context_tracking.h>
5 #include <linux/compat.h>
6 #include <linux/sched/debug.h> /* for show_regs */
8 #include <asm/asm-prototypes.h>
10 #include <asm/cputime.h>
11 #include <asm/interrupt.h>
12 #include <asm/hw_irq.h>
13 #include <asm/interrupt.h>
14 #include <asm/kprobes.h>
16 #include <asm/ptrace.h>
18 #include <asm/signal.h>
19 #include <asm/switch_to.h>
20 #include <asm/syscall.h>
22 #include <asm/unistd.h>
24 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
25 unsigned long global_dbcr0[NR_CPUS];
28 typedef long (*syscall_fn)(long, long, long, long, long, long);
30 #ifdef CONFIG_PPC_BOOK3S_64
31 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
32 static inline bool exit_must_hard_disable(void)
34 return static_branch_unlikely(&interrupt_exit_not_reentrant);
37 static inline bool exit_must_hard_disable(void)
44 * local irqs must be disabled. Returns false if the caller must re-enable
45 * them, check for new work, and try again.
47 * This should be called with local irqs disabled, but if they were previously
48 * enabled when the interrupt handler returns (indicating a process-context /
49 * synchronous interrupt) then irqs_enabled should be true.
51 * restartable is true then EE/RI can be left on because interrupts are handled
52 * with a restart sequence.
54 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
56 /* This must be done with RI=1 because tracing may touch vmaps */
59 if (exit_must_hard_disable() || !restartable)
60 __hard_EE_RI_disable();
63 /* This pattern matches prep_irq_for_idle */
64 if (unlikely(lazy_irq_pending_nocheck())) {
65 if (exit_must_hard_disable() || !restartable) {
66 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
77 /* Has to run notrace because it is entered not completely "reconciled" */
78 notrace long system_call_exception(long r3, long r4, long r5,
79 long r6, long r7, long r8,
80 unsigned long r0, struct pt_regs *regs)
88 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
89 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
91 trace_hardirqs_off(); /* finish reconciling */
93 CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
96 if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
97 BUG_ON(!(regs->msr & MSR_RI));
98 BUG_ON(!(regs->msr & MSR_PR));
99 BUG_ON(arch_irq_disabled_regs(regs));
101 #ifdef CONFIG_PPC_PKEY
102 if (mmu_has_feature(MMU_FTR_PKEY)) {
103 unsigned long amr, iamr;
104 bool flush_needed = false;
106 * When entering from userspace we mostly have the AMR/IAMR
107 * different from kernel default values. Hence don't compare.
109 amr = mfspr(SPRN_AMR);
110 iamr = mfspr(SPRN_IAMR);
113 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
114 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
117 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
118 mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
125 kuap_assert_locked();
127 booke_restore_dbcr0();
129 account_cpu_user_entry();
131 account_stolen_time();
134 * This is not required for the syscall exit path, but makes the
135 * stack frame look nicer. If this was initialised in the first stack
136 * frame, or if the unwinder was taught the first stack frame always
137 * returns to user with IRQS_ENABLED, this store could be avoided!
139 irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
143 if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
144 if (unlikely(trap_is_unsupported_scv(regs))) {
145 /* Unsupported scv vector */
146 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
150 * We use the return value of do_syscall_trace_enter() as the
151 * syscall number. If the syscall was rejected for any reason
152 * do_syscall_trace_enter() returns an invalid syscall number
153 * and the test against NR_syscalls will fail and the return
154 * value to be used is in regs->gpr[3].
156 r0 = do_syscall_trace_enter(regs);
157 if (unlikely(r0 >= NR_syscalls))
166 } else if (unlikely(r0 >= NR_syscalls)) {
167 if (unlikely(trap_is_unsupported_scv(regs))) {
168 /* Unsupported scv vector */
169 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
175 /* May be faster to do array_index_nospec? */
178 if (unlikely(is_compat_task())) {
179 f = (void *)compat_sys_call_table[r0];
181 r3 &= 0x00000000ffffffffULL;
182 r4 &= 0x00000000ffffffffULL;
183 r5 &= 0x00000000ffffffffULL;
184 r6 &= 0x00000000ffffffffULL;
185 r7 &= 0x00000000ffffffffULL;
186 r8 &= 0x00000000ffffffffULL;
189 f = (void *)sys_call_table[r0];
192 return f(r3, r4, r5, r6, r7, r8);
195 static notrace void booke_load_dbcr0(void)
197 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
198 unsigned long dbcr0 = current->thread.debug.dbcr0;
200 if (likely(!(dbcr0 & DBCR0_IDM)))
204 * Check to see if the dbcr0 register is set up to debug.
205 * Use the internal debug mode bit to do this.
207 mtmsr(mfmsr() & ~MSR_DE);
208 if (IS_ENABLED(CONFIG_PPC32)) {
210 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
212 mtspr(SPRN_DBCR0, dbcr0);
213 mtspr(SPRN_DBSR, -1);
217 static void check_return_regs_valid(struct pt_regs *regs)
219 #ifdef CONFIG_PPC_BOOK3S_64
220 unsigned long trap, srr0, srr1;
225 if (trap_is_scv(regs))
229 // EE in HV mode sets HSRRs like 0xea0
230 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
235 case INTERRUPT_H_DATA_STORAGE:
241 case INTERRUPT_H_FAC_UNAVAIL:
246 validp = &local_paca->hsrr_valid;
250 srr0 = mfspr(SPRN_HSRR0);
251 srr1 = mfspr(SPRN_HSRR1);
256 validp = &local_paca->srr_valid;
260 srr0 = mfspr(SPRN_SRR0);
261 srr1 = mfspr(SPRN_SRR1);
266 if (srr0 == regs->nip && srr1 == regs->msr)
270 * A NMI / soft-NMI interrupt may have come in after we found
271 * srr_valid and before the SRRs are loaded. The interrupt then
272 * comes in and clobbers SRRs and clears srr_valid. Then we load
273 * the SRRs here and test them above and find they don't match.
275 * Test validity again after that, to catch such false positives.
277 * This test in general will have some window for false negatives
278 * and may not catch and fix all such cases if an NMI comes in
279 * later and clobbers SRRs without clearing srr_valid, but hopefully
280 * such things will get caught most of the time, statistically
281 * enough to be able to get a warning out.
290 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
291 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
295 *validp = 0; /* fixup */
299 static notrace unsigned long
300 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
302 unsigned long ti_flags;
305 ti_flags = READ_ONCE(current_thread_info()->flags);
306 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
308 if (ti_flags & _TIF_NEED_RESCHED) {
312 * SIGPENDING must restore signal handler function
313 * argument GPRs, and some non-volatiles (e.g., r1).
314 * Restore all for now. This could be made lighter.
316 if (ti_flags & _TIF_SIGPENDING)
317 ret |= _TIF_RESTOREALL;
318 do_notify_resume(regs, ti_flags);
321 ti_flags = READ_ONCE(current_thread_info()->flags);
324 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
325 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
326 unlikely((ti_flags & _TIF_RESTORE_TM))) {
327 restore_tm_state(regs);
329 unsigned long mathflags = MSR_FP;
331 if (cpu_has_feature(CPU_FTR_VSX))
332 mathflags |= MSR_VEC | MSR_VSX;
333 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
334 mathflags |= MSR_VEC;
337 * If userspace MSR has all available FP bits set,
338 * then they are live and no need to restore. If not,
339 * it means the regs were given up and restore_math
340 * may decide to restore them (to avoid taking an FP
343 if ((regs->msr & mathflags) != mathflags)
348 check_return_regs_valid(regs);
351 if (!prep_irq_for_enabled_exit(true)) {
358 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
359 local_paca->tm_scratch = regs->msr;
364 account_cpu_user_exit();
366 /* Restore user access locks last */
367 kuap_user_restore(regs);
374 * This should be called after a syscall returns, with r3 the return value
375 * from the syscall. If this function returns non-zero, the system call
376 * exit assembly should additionally load all GPR registers and CTR and XER
377 * from the interrupt frame.
379 * The function graph tracer can not trace the return side of this function,
380 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
382 notrace unsigned long syscall_exit_prepare(unsigned long r3,
383 struct pt_regs *regs,
386 unsigned long ti_flags;
387 unsigned long ret = 0;
388 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
390 CT_WARN_ON(ct_state() == CONTEXT_USER);
392 kuap_assert_locked();
396 /* Check whether the syscall is issued inside a restartable sequence */
399 ti_flags = current_thread_info()->flags;
401 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
402 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
404 regs->ccr |= 0x10000000; /* Set SO bit in CR */
408 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
409 if (ti_flags & _TIF_RESTOREALL)
410 ret = _TIF_RESTOREALL;
413 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
418 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
419 do_syscall_trace_leave(regs);
420 ret |= _TIF_RESTOREALL;
424 ret = interrupt_exit_user_prepare_main(ret, regs);
427 regs->exit_result = ret;
434 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
437 * This is called when detecting a soft-pending interrupt as well as
438 * an alternate-return interrupt. So we can't just have the alternate
439 * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
440 * the soft-pending case were to fix things up as well). RI might be
441 * disabled, in which case it gets re-enabled by __hard_irq_disable().
443 __hard_irq_disable();
444 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
446 #ifdef CONFIG_PPC_BOOK3S_64
447 set_kuap(AMR_KUAP_BLOCKED);
450 trace_hardirqs_off();
452 account_cpu_user_entry();
454 BUG_ON(!user_mode(regs));
456 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
458 return regs->exit_result;
462 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
466 if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
467 BUG_ON(!(regs->msr & MSR_RI));
468 BUG_ON(!(regs->msr & MSR_PR));
469 BUG_ON(arch_irq_disabled_regs(regs));
470 CT_WARN_ON(ct_state() == CONTEXT_USER);
473 * We don't need to restore AMR on the way back to userspace for KUAP.
474 * AMR can only have been unlocked if we interrupted the kernel.
476 kuap_assert_locked();
480 ret = interrupt_exit_user_prepare_main(0, regs);
483 regs->exit_result = ret;
489 void preempt_schedule_irq(void);
491 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
494 unsigned long ret = 0;
496 bool stack_store = current_thread_info()->flags &
497 _TIF_EMULATE_STACK_STORE;
499 if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
500 unlikely(!(regs->msr & MSR_RI)))
501 unrecoverable_exception(regs);
502 BUG_ON(regs->msr & MSR_PR);
504 * CT_WARN_ON comes here via program_check_exception,
505 * so avoid recursion.
507 if (TRAP(regs) != INTERRUPT_PROGRAM)
508 CT_WARN_ON(ct_state() == CONTEXT_USER);
510 kuap = kuap_get_and_assert_locked();
512 local_irq_save(flags);
514 if (!arch_irq_disabled_regs(regs)) {
515 /* Returning to a kernel context with local irqs enabled. */
516 WARN_ON_ONCE(!(regs->msr & MSR_EE));
518 if (IS_ENABLED(CONFIG_PREEMPT)) {
519 /* Return to preemptible kernel context */
520 if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
521 if (preempt_count() == 0)
522 preempt_schedule_irq();
526 check_return_regs_valid(regs);
529 * Stack store exit can't be restarted because the interrupt
530 * stack frame might have been clobbered.
532 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
534 * Replay pending soft-masked interrupts now. Don't
535 * just local_irq_enabe(); local_irq_disable(); because
536 * if we are returning from an asynchronous interrupt
537 * here, another one might hit after irqs are enabled,
538 * and it would exit via this same path allowing
539 * another to fire, and so on unbounded.
542 replay_soft_interrupts();
543 /* Took an interrupt, may have more exit work to do. */
548 * An interrupt may clear MSR[EE] and set this concurrently,
549 * but it will be marked pending and the exit will be retried.
550 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
551 * clear, until interrupt_exit_kernel_restart() calls
552 * hard_irq_disable(), which will set HARD_DIS again.
554 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
557 check_return_regs_valid(regs);
559 if (unlikely(stack_store))
560 __hard_EE_RI_disable();
562 * Returning to a kernel context with local irqs disabled.
563 * Here, if EE was enabled in the interrupted context, enable
564 * it on return as well. A problem exists here where a soft
565 * masked interrupt may have cleared MSR[EE] and set HARD_DIS
566 * here, and it will still exist on return to the caller. This
567 * will be resolved by the masked interrupt firing again.
569 if (regs->msr & MSR_EE)
570 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
571 #endif /* CONFIG_PPC64 */
574 if (unlikely(stack_store)) {
575 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
579 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
580 local_paca->tm_scratch = regs->msr;
584 * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
585 * mtmsr, which would cause Read-After-Write stalls. Hence, take the
586 * AMR value from the check above.
588 kuap_kernel_restore(regs, kuap);
594 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
596 __hard_irq_disable();
597 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
599 #ifdef CONFIG_PPC_BOOK3S_64
600 set_kuap(AMR_KUAP_BLOCKED);
603 trace_hardirqs_off();
605 account_cpu_user_entry();
607 BUG_ON(!user_mode(regs));
609 regs->exit_result |= interrupt_exit_user_prepare(regs);
611 return regs->exit_result;
615 * No real need to return a value here because the stack store case does not
618 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
620 __hard_irq_disable();
621 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
623 #ifdef CONFIG_PPC_BOOK3S_64
624 set_kuap(AMR_KUAP_BLOCKED);
627 if (regs->softe == IRQS_ENABLED)
628 trace_hardirqs_off();
630 BUG_ON(user_mode(regs));
632 return interrupt_exit_kernel_prepare(regs);