powerpc/64e/interrupt: Use new interrupt context tracking scheme
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / interrupt.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11
12 static inline void nap_adjust_return(struct pt_regs *regs)
13 {
14 #ifdef CONFIG_PPC_970_NAP
15         if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
16                 /* Can avoid a test-and-clear because NMIs do not call this */
17                 clear_thread_local_flags(_TLF_NAPPING);
18                 regs->nip = (unsigned long)power4_idle_nap_return;
19         }
20 #endif
21 }
22
23 struct interrupt_state {
24 };
25
26 static inline void booke_restore_dbcr0(void)
27 {
28 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
29         unsigned long dbcr0 = current->thread.debug.dbcr0;
30
31         if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
32                 mtspr(SPRN_DBSR, -1);
33                 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
34         }
35 #endif
36 }
37
38 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
39 {
40 #ifdef CONFIG_PPC32
41         if (!arch_irq_disabled_regs(regs))
42                 trace_hardirqs_off();
43
44         if (user_mode(regs)) {
45                 kuep_lock();
46                 account_cpu_user_entry();
47         } else {
48                 kuap_save_and_lock(regs);
49         }
50 #endif
51
52 #ifdef CONFIG_PPC64
53         if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
54                 trace_hardirqs_off();
55         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
56
57         if (user_mode(regs)) {
58                 CT_WARN_ON(ct_state() != CONTEXT_USER);
59                 user_exit_irqoff();
60
61                 account_cpu_user_entry();
62                 account_stolen_time();
63         } else {
64                 /*
65                  * CT_WARN_ON comes here via program_check_exception,
66                  * so avoid recursion.
67                  */
68                 if (TRAP(regs) != 0x700)
69                         CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
70         }
71 #endif
72
73         booke_restore_dbcr0();
74 }
75
76 /*
77  * Care should be taken to note that interrupt_exit_prepare and
78  * interrupt_async_exit_prepare do not necessarily return immediately to
79  * regs context (e.g., if regs is usermode, we don't necessarily return to
80  * user mode). Other interrupts might be taken between here and return,
81  * context switch / preemption may occur in the exit path after this, or a
82  * signal may be delivered, etc.
83  *
84  * The real interrupt exit code is platform specific, e.g.,
85  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
86  *
87  * However interrupt_nmi_exit_prepare does return directly to regs, because
88  * NMIs do not do "exit work" or replay soft-masked interrupts.
89  */
90 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
91 {
92         if (user_mode(regs))
93                 kuep_unlock();
94 }
95
96 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
97 {
98 #ifdef CONFIG_PPC_BOOK3S_64
99         if (cpu_has_feature(CPU_FTR_CTRL) &&
100             !test_thread_local_flags(_TLF_RUNLATCH))
101                 __ppc64_runlatch_on();
102 #endif
103
104         interrupt_enter_prepare(regs, state);
105         irq_enter();
106 }
107
108 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
109 {
110         /*
111          * Adjust at exit so the main handler sees the true NIA. This must
112          * come before irq_exit() because irq_exit can enable interrupts, and
113          * if another interrupt is taken before nap_adjust_return has run
114          * here, then that interrupt would return directly to idle nap return.
115          */
116         nap_adjust_return(regs);
117
118         irq_exit();
119         interrupt_exit_prepare(regs, state);
120 }
121
122 struct interrupt_nmi_state {
123 #ifdef CONFIG_PPC64
124         u8 irq_soft_mask;
125         u8 irq_happened;
126         u8 ftrace_enabled;
127 #endif
128 };
129
130 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
131 {
132         /* Allow DEC and PMI to be traced when they are soft-NMI */
133         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
134                 if (TRAP(regs) == 0x900)
135                        return false;
136                 if (TRAP(regs) == 0xf00)
137                        return false;
138         }
139         if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
140                 if (TRAP(regs) == 0x260)
141                         return false;
142         }
143
144         return true;
145 }
146
147 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
148 {
149 #ifdef CONFIG_PPC64
150         state->irq_soft_mask = local_paca->irq_soft_mask;
151         state->irq_happened = local_paca->irq_happened;
152
153         /*
154          * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
155          * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
156          * because that goes through irq tracing which we don't want in NMI.
157          */
158         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
159         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
160
161         /* Don't do any per-CPU operations until interrupt state is fixed */
162
163         if (nmi_disables_ftrace(regs)) {
164                 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
165                 this_cpu_set_ftrace_enabled(0);
166         }
167 #endif
168
169         /*
170          * Do not use nmi_enter() for pseries hash guest taking a real-mode
171          * NMI because not everything it touches is within the RMA limit.
172          */
173         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
174                         !firmware_has_feature(FW_FEATURE_LPAR) ||
175                         radix_enabled() || (mfmsr() & MSR_DR))
176                 nmi_enter();
177 }
178
179 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
180 {
181         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
182                         !firmware_has_feature(FW_FEATURE_LPAR) ||
183                         radix_enabled() || (mfmsr() & MSR_DR))
184                 nmi_exit();
185
186         /*
187          * nmi does not call nap_adjust_return because nmi should not create
188          * new work to do (must use irq_work for that).
189          */
190
191 #ifdef CONFIG_PPC64
192         if (nmi_disables_ftrace(regs))
193                 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
194
195         /* Check we didn't change the pending interrupt mask. */
196         WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
197         local_paca->irq_happened = state->irq_happened;
198         local_paca->irq_soft_mask = state->irq_soft_mask;
199 #endif
200 }
201
202 /*
203  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
204  * function definition. The reason for this is the noinstr section is placed
205  * after the main text section, i.e., very far away from the interrupt entry
206  * asm. That creates problems with fitting linker stubs when building large
207  * kernels.
208  */
209 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
210
211 /**
212  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
213  * @func:       Function name of the entry point
214  * @returns:    Returns a value back to asm caller
215  */
216 #define DECLARE_INTERRUPT_HANDLER_RAW(func)                             \
217         __visible long func(struct pt_regs *regs)
218
219 /**
220  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
221  * @func:       Function name of the entry point
222  * @returns:    Returns a value back to asm caller
223  *
224  * @func is called from ASM entry code.
225  *
226  * This is a plain function which does no tracing, reconciling, etc.
227  * The macro is written so it acts as function definition. Append the
228  * body with a pair of curly brackets.
229  *
230  * raw interrupt handlers must not enable or disable interrupts, or
231  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
232  * not be advisable either, although may be possible in a pinch, the
233  * trace will look odd at least.
234  *
235  * A raw handler may call one of the other interrupt handler functions
236  * to be converted into that interrupt context without these restrictions.
237  *
238  * On PPC64, _RAW handlers may return with fast_interrupt_return.
239  *
240  * Specific handlers may have additional restrictions.
241  */
242 #define DEFINE_INTERRUPT_HANDLER_RAW(func)                              \
243 static __always_inline long ____##func(struct pt_regs *regs);           \
244                                                                         \
245 interrupt_handler long func(struct pt_regs *regs)                       \
246 {                                                                       \
247         long ret;                                                       \
248                                                                         \
249         ret = ____##func (regs);                                        \
250                                                                         \
251         return ret;                                                     \
252 }                                                                       \
253 NOKPROBE_SYMBOL(func);                                                  \
254                                                                         \
255 static __always_inline long ____##func(struct pt_regs *regs)
256
257 /**
258  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
259  * @func:       Function name of the entry point
260  */
261 #define DECLARE_INTERRUPT_HANDLER(func)                                 \
262         __visible void func(struct pt_regs *regs)
263
264 /**
265  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
266  * @func:       Function name of the entry point
267  *
268  * @func is called from ASM entry code.
269  *
270  * The macro is written so it acts as function definition. Append the
271  * body with a pair of curly brackets.
272  */
273 #define DEFINE_INTERRUPT_HANDLER(func)                                  \
274 static __always_inline void ____##func(struct pt_regs *regs);           \
275                                                                         \
276 interrupt_handler void func(struct pt_regs *regs)                       \
277 {                                                                       \
278         struct interrupt_state state;                                   \
279                                                                         \
280         interrupt_enter_prepare(regs, &state);                          \
281                                                                         \
282         ____##func (regs);                                              \
283                                                                         \
284         interrupt_exit_prepare(regs, &state);                           \
285 }                                                                       \
286 NOKPROBE_SYMBOL(func);                                                  \
287                                                                         \
288 static __always_inline void ____##func(struct pt_regs *regs)
289
290 /**
291  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
292  * @func:       Function name of the entry point
293  * @returns:    Returns a value back to asm caller
294  */
295 #define DECLARE_INTERRUPT_HANDLER_RET(func)                             \
296         __visible long func(struct pt_regs *regs)
297
298 /**
299  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
300  * @func:       Function name of the entry point
301  * @returns:    Returns a value back to asm caller
302  *
303  * @func is called from ASM entry code.
304  *
305  * The macro is written so it acts as function definition. Append the
306  * body with a pair of curly brackets.
307  */
308 #define DEFINE_INTERRUPT_HANDLER_RET(func)                              \
309 static __always_inline long ____##func(struct pt_regs *regs);           \
310                                                                         \
311 interrupt_handler long func(struct pt_regs *regs)                       \
312 {                                                                       \
313         struct interrupt_state state;                                   \
314         long ret;                                                       \
315                                                                         \
316         interrupt_enter_prepare(regs, &state);                          \
317                                                                         \
318         ret = ____##func (regs);                                        \
319                                                                         \
320         interrupt_exit_prepare(regs, &state);                           \
321                                                                         \
322         return ret;                                                     \
323 }                                                                       \
324 NOKPROBE_SYMBOL(func);                                                  \
325                                                                         \
326 static __always_inline long ____##func(struct pt_regs *regs)
327
328 /**
329  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
330  * @func:       Function name of the entry point
331  */
332 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)                           \
333         __visible void func(struct pt_regs *regs)
334
335 /**
336  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
337  * @func:       Function name of the entry point
338  *
339  * @func is called from ASM entry code.
340  *
341  * The macro is written so it acts as function definition. Append the
342  * body with a pair of curly brackets.
343  */
344 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)                            \
345 static __always_inline void ____##func(struct pt_regs *regs);           \
346                                                                         \
347 interrupt_handler void func(struct pt_regs *regs)                       \
348 {                                                                       \
349         struct interrupt_state state;                                   \
350                                                                         \
351         interrupt_async_enter_prepare(regs, &state);                    \
352                                                                         \
353         ____##func (regs);                                              \
354                                                                         \
355         interrupt_async_exit_prepare(regs, &state);                     \
356 }                                                                       \
357 NOKPROBE_SYMBOL(func);                                                  \
358                                                                         \
359 static __always_inline void ____##func(struct pt_regs *regs)
360
361 /**
362  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
363  * @func:       Function name of the entry point
364  * @returns:    Returns a value back to asm caller
365  */
366 #define DECLARE_INTERRUPT_HANDLER_NMI(func)                             \
367         __visible long func(struct pt_regs *regs)
368
369 /**
370  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
371  * @func:       Function name of the entry point
372  * @returns:    Returns a value back to asm caller
373  *
374  * @func is called from ASM entry code.
375  *
376  * The macro is written so it acts as function definition. Append the
377  * body with a pair of curly brackets.
378  */
379 #define DEFINE_INTERRUPT_HANDLER_NMI(func)                              \
380 static __always_inline long ____##func(struct pt_regs *regs);           \
381                                                                         \
382 interrupt_handler long func(struct pt_regs *regs)                       \
383 {                                                                       \
384         struct interrupt_nmi_state state;                               \
385         long ret;                                                       \
386                                                                         \
387         interrupt_nmi_enter_prepare(regs, &state);                      \
388                                                                         \
389         ret = ____##func (regs);                                        \
390                                                                         \
391         interrupt_nmi_exit_prepare(regs, &state);                       \
392                                                                         \
393         return ret;                                                     \
394 }                                                                       \
395 NOKPROBE_SYMBOL(func);                                                  \
396                                                                         \
397 static __always_inline long ____##func(struct pt_regs *regs)
398
399
400 /* Interrupt handlers */
401 /* kernel/traps.c */
402 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
403 #ifdef CONFIG_PPC_BOOK3S_64
404 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
405 #else
406 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
407 #endif
408 DECLARE_INTERRUPT_HANDLER(SMIException);
409 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
410 DECLARE_INTERRUPT_HANDLER(unknown_exception);
411 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
412 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
413 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
414 DECLARE_INTERRUPT_HANDLER(RunModeException);
415 DECLARE_INTERRUPT_HANDLER(single_step_exception);
416 DECLARE_INTERRUPT_HANDLER(program_check_exception);
417 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
418 DECLARE_INTERRUPT_HANDLER(alignment_exception);
419 DECLARE_INTERRUPT_HANDLER(StackOverflow);
420 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
421 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
422 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
423 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
424 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
425 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
426 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
427 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
428 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
429 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
430 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
431 DECLARE_INTERRUPT_HANDLER(DebugException);
432 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
433 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
434 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
435 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
436 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
437 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
438
439 /* slb.c */
440 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
441 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
442
443 /* hash_utils.c */
444 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
445
446 /* fault.c */
447 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
448 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
449
450 /* process.c */
451 DECLARE_INTERRUPT_HANDLER(do_break);
452
453 /* time.c */
454 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
455
456 /* mce.c */
457 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
458 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
459
460 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
461
462 void __noreturn unrecoverable_exception(struct pt_regs *regs);
463
464 void replay_system_reset(void);
465 void replay_soft_interrupts(void);
466
467 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
468 {
469         if (!arch_irq_disabled_regs(regs))
470                 local_irq_enable();
471 }
472
473 #endif /* _ASM_POWERPC_INTERRUPT_H */