powerpc/32s: Move KUEP locking/unlocking in C
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / interrupt.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11
12 struct interrupt_state {
13 #ifdef CONFIG_PPC_BOOK3E_64
14         enum ctx_state ctx_state;
15 #endif
16 };
17
18 static inline void booke_restore_dbcr0(void)
19 {
20 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
21         unsigned long dbcr0 = current->thread.debug.dbcr0;
22
23         if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
24                 mtspr(SPRN_DBSR, -1);
25                 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
26         }
27 #endif
28 }
29
30 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
31 {
32 #ifdef CONFIG_PPC32
33         if (!arch_irq_disabled_regs(regs))
34                 trace_hardirqs_off();
35
36         if (user_mode(regs)) {
37                 kuep_lock();
38                 account_cpu_user_entry();
39         }
40 #endif
41         /*
42          * Book3E reconciles irq soft mask in asm
43          */
44 #ifdef CONFIG_PPC_BOOK3S_64
45         if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
46                 trace_hardirqs_off();
47         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
48
49         if (user_mode(regs)) {
50                 CT_WARN_ON(ct_state() != CONTEXT_USER);
51                 user_exit_irqoff();
52
53                 account_cpu_user_entry();
54                 account_stolen_time();
55         } else {
56                 /*
57                  * CT_WARN_ON comes here via program_check_exception,
58                  * so avoid recursion.
59                  */
60                 if (TRAP(regs) != 0x700)
61                         CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
62         }
63 #endif
64
65 #ifdef CONFIG_PPC_BOOK3E_64
66         state->ctx_state = exception_enter();
67         if (user_mode(regs))
68                 account_cpu_user_entry();
69 #endif
70
71         booke_restore_dbcr0();
72 }
73
74 /*
75  * Care should be taken to note that interrupt_exit_prepare and
76  * interrupt_async_exit_prepare do not necessarily return immediately to
77  * regs context (e.g., if regs is usermode, we don't necessarily return to
78  * user mode). Other interrupts might be taken between here and return,
79  * context switch / preemption may occur in the exit path after this, or a
80  * signal may be delivered, etc.
81  *
82  * The real interrupt exit code is platform specific, e.g.,
83  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
84  *
85  * However interrupt_nmi_exit_prepare does return directly to regs, because
86  * NMIs do not do "exit work" or replay soft-masked interrupts.
87  */
88 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
89 {
90 #ifdef CONFIG_PPC_BOOK3E_64
91         exception_exit(state->ctx_state);
92 #endif
93
94         if (user_mode(regs))
95                 kuep_unlock();
96         /*
97          * Book3S exits to user via interrupt_exit_user_prepare(), which does
98          * context tracking, which is a cleaner way to handle PREEMPT=y
99          * and avoid context entry/exit in e.g., preempt_schedule_irq()),
100          * which is likely to be where the core code wants to end up.
101          *
102          * The above comment explains why we can't do the
103          *
104          *     if (user_mode(regs))
105          *         user_exit_irqoff();
106          *
107          * sequence here.
108          */
109 }
110
111 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
112 {
113 #ifdef CONFIG_PPC_BOOK3S_64
114         if (cpu_has_feature(CPU_FTR_CTRL) &&
115             !test_thread_local_flags(_TLF_RUNLATCH))
116                 __ppc64_runlatch_on();
117 #endif
118
119         interrupt_enter_prepare(regs, state);
120         irq_enter();
121 }
122
123 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
124 {
125         irq_exit();
126         interrupt_exit_prepare(regs, state);
127 }
128
129 struct interrupt_nmi_state {
130 #ifdef CONFIG_PPC64
131 #ifdef CONFIG_PPC_BOOK3S_64
132         u8 irq_soft_mask;
133         u8 irq_happened;
134 #endif
135         u8 ftrace_enabled;
136 #endif
137 };
138
139 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
140 {
141 #ifdef CONFIG_PPC64
142 #ifdef CONFIG_PPC_BOOK3S_64
143         state->irq_soft_mask = local_paca->irq_soft_mask;
144         state->irq_happened = local_paca->irq_happened;
145
146         /*
147          * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
148          * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
149          * because that goes through irq tracing which we don't want in NMI.
150          */
151         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
152         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
153
154         /* Don't do any per-CPU operations until interrupt state is fixed */
155 #endif
156         /* Allow DEC and PMI to be traced when they are soft-NMI */
157         if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
158                 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
159                 this_cpu_set_ftrace_enabled(0);
160         }
161 #endif
162
163         /*
164          * Do not use nmi_enter() for pseries hash guest taking a real-mode
165          * NMI because not everything it touches is within the RMA limit.
166          */
167         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
168                         !firmware_has_feature(FW_FEATURE_LPAR) ||
169                         radix_enabled() || (mfmsr() & MSR_DR))
170                 nmi_enter();
171 }
172
173 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
174 {
175         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
176                         !firmware_has_feature(FW_FEATURE_LPAR) ||
177                         radix_enabled() || (mfmsr() & MSR_DR))
178                 nmi_exit();
179
180 #ifdef CONFIG_PPC64
181         if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
182                 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
183
184 #ifdef CONFIG_PPC_BOOK3S_64
185         /* Check we didn't change the pending interrupt mask. */
186         WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
187         local_paca->irq_happened = state->irq_happened;
188         local_paca->irq_soft_mask = state->irq_soft_mask;
189 #endif
190 #endif
191 }
192
193 /*
194  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
195  * function definition. The reason for this is the noinstr section is placed
196  * after the main text section, i.e., very far away from the interrupt entry
197  * asm. That creates problems with fitting linker stubs when building large
198  * kernels.
199  */
200 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
201
202 /**
203  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
204  * @func:       Function name of the entry point
205  * @returns:    Returns a value back to asm caller
206  */
207 #define DECLARE_INTERRUPT_HANDLER_RAW(func)                             \
208         __visible long func(struct pt_regs *regs)
209
210 /**
211  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
212  * @func:       Function name of the entry point
213  * @returns:    Returns a value back to asm caller
214  *
215  * @func is called from ASM entry code.
216  *
217  * This is a plain function which does no tracing, reconciling, etc.
218  * The macro is written so it acts as function definition. Append the
219  * body with a pair of curly brackets.
220  *
221  * raw interrupt handlers must not enable or disable interrupts, or
222  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
223  * not be advisable either, although may be possible in a pinch, the
224  * trace will look odd at least.
225  *
226  * A raw handler may call one of the other interrupt handler functions
227  * to be converted into that interrupt context without these restrictions.
228  *
229  * On PPC64, _RAW handlers may return with fast_interrupt_return.
230  *
231  * Specific handlers may have additional restrictions.
232  */
233 #define DEFINE_INTERRUPT_HANDLER_RAW(func)                              \
234 static __always_inline long ____##func(struct pt_regs *regs);           \
235                                                                         \
236 interrupt_handler long func(struct pt_regs *regs)                       \
237 {                                                                       \
238         long ret;                                                       \
239                                                                         \
240         ret = ____##func (regs);                                        \
241                                                                         \
242         return ret;                                                     \
243 }                                                                       \
244 NOKPROBE_SYMBOL(func);                                                  \
245                                                                         \
246 static __always_inline long ____##func(struct pt_regs *regs)
247
248 /**
249  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
250  * @func:       Function name of the entry point
251  */
252 #define DECLARE_INTERRUPT_HANDLER(func)                                 \
253         __visible void func(struct pt_regs *regs)
254
255 /**
256  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
257  * @func:       Function name of the entry point
258  *
259  * @func is called from ASM entry code.
260  *
261  * The macro is written so it acts as function definition. Append the
262  * body with a pair of curly brackets.
263  */
264 #define DEFINE_INTERRUPT_HANDLER(func)                                  \
265 static __always_inline void ____##func(struct pt_regs *regs);           \
266                                                                         \
267 interrupt_handler void func(struct pt_regs *regs)                       \
268 {                                                                       \
269         struct interrupt_state state;                                   \
270                                                                         \
271         interrupt_enter_prepare(regs, &state);                          \
272                                                                         \
273         ____##func (regs);                                              \
274                                                                         \
275         interrupt_exit_prepare(regs, &state);                           \
276 }                                                                       \
277 NOKPROBE_SYMBOL(func);                                                  \
278                                                                         \
279 static __always_inline void ____##func(struct pt_regs *regs)
280
281 /**
282  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
283  * @func:       Function name of the entry point
284  * @returns:    Returns a value back to asm caller
285  */
286 #define DECLARE_INTERRUPT_HANDLER_RET(func)                             \
287         __visible long func(struct pt_regs *regs)
288
289 /**
290  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
291  * @func:       Function name of the entry point
292  * @returns:    Returns a value back to asm caller
293  *
294  * @func is called from ASM entry code.
295  *
296  * The macro is written so it acts as function definition. Append the
297  * body with a pair of curly brackets.
298  */
299 #define DEFINE_INTERRUPT_HANDLER_RET(func)                              \
300 static __always_inline long ____##func(struct pt_regs *regs);           \
301                                                                         \
302 interrupt_handler long func(struct pt_regs *regs)                       \
303 {                                                                       \
304         struct interrupt_state state;                                   \
305         long ret;                                                       \
306                                                                         \
307         interrupt_enter_prepare(regs, &state);                          \
308                                                                         \
309         ret = ____##func (regs);                                        \
310                                                                         \
311         interrupt_exit_prepare(regs, &state);                           \
312                                                                         \
313         return ret;                                                     \
314 }                                                                       \
315 NOKPROBE_SYMBOL(func);                                                  \
316                                                                         \
317 static __always_inline long ____##func(struct pt_regs *regs)
318
319 /**
320  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
321  * @func:       Function name of the entry point
322  */
323 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)                           \
324         __visible void func(struct pt_regs *regs)
325
326 /**
327  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
328  * @func:       Function name of the entry point
329  *
330  * @func is called from ASM entry code.
331  *
332  * The macro is written so it acts as function definition. Append the
333  * body with a pair of curly brackets.
334  */
335 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)                            \
336 static __always_inline void ____##func(struct pt_regs *regs);           \
337                                                                         \
338 interrupt_handler void func(struct pt_regs *regs)                       \
339 {                                                                       \
340         struct interrupt_state state;                                   \
341                                                                         \
342         interrupt_async_enter_prepare(regs, &state);                    \
343                                                                         \
344         ____##func (regs);                                              \
345                                                                         \
346         interrupt_async_exit_prepare(regs, &state);                     \
347 }                                                                       \
348 NOKPROBE_SYMBOL(func);                                                  \
349                                                                         \
350 static __always_inline void ____##func(struct pt_regs *regs)
351
352 /**
353  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
354  * @func:       Function name of the entry point
355  * @returns:    Returns a value back to asm caller
356  */
357 #define DECLARE_INTERRUPT_HANDLER_NMI(func)                             \
358         __visible long func(struct pt_regs *regs)
359
360 /**
361  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
362  * @func:       Function name of the entry point
363  * @returns:    Returns a value back to asm caller
364  *
365  * @func is called from ASM entry code.
366  *
367  * The macro is written so it acts as function definition. Append the
368  * body with a pair of curly brackets.
369  */
370 #define DEFINE_INTERRUPT_HANDLER_NMI(func)                              \
371 static __always_inline long ____##func(struct pt_regs *regs);           \
372                                                                         \
373 interrupt_handler long func(struct pt_regs *regs)                       \
374 {                                                                       \
375         struct interrupt_nmi_state state;                               \
376         long ret;                                                       \
377                                                                         \
378         interrupt_nmi_enter_prepare(regs, &state);                      \
379                                                                         \
380         ret = ____##func (regs);                                        \
381                                                                         \
382         interrupt_nmi_exit_prepare(regs, &state);                       \
383                                                                         \
384         return ret;                                                     \
385 }                                                                       \
386 NOKPROBE_SYMBOL(func);                                                  \
387                                                                         \
388 static __always_inline long ____##func(struct pt_regs *regs)
389
390
391 /* Interrupt handlers */
392 /* kernel/traps.c */
393 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
394 #ifdef CONFIG_PPC_BOOK3S_64
395 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
396 #else
397 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
398 #endif
399 DECLARE_INTERRUPT_HANDLER(SMIException);
400 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
401 DECLARE_INTERRUPT_HANDLER(unknown_exception);
402 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
403 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
404 DECLARE_INTERRUPT_HANDLER(RunModeException);
405 DECLARE_INTERRUPT_HANDLER(single_step_exception);
406 DECLARE_INTERRUPT_HANDLER(program_check_exception);
407 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
408 DECLARE_INTERRUPT_HANDLER(alignment_exception);
409 DECLARE_INTERRUPT_HANDLER(StackOverflow);
410 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
411 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
412 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
413 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
414 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
415 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
416 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
417 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
418 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
419 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
420 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
421 DECLARE_INTERRUPT_HANDLER(DebugException);
422 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
423 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
424 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
425 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
426 DECLARE_INTERRUPT_HANDLER(WatchdogException);
427 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
428
429 /* slb.c */
430 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
431 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
432
433 /* hash_utils.c */
434 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
435
436 /* fault.c */
437 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
438 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
439
440 /* process.c */
441 DECLARE_INTERRUPT_HANDLER(do_break);
442
443 /* time.c */
444 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
445
446 /* mce.c */
447 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
448 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
449
450 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
451
452 void __noreturn unrecoverable_exception(struct pt_regs *regs);
453
454 void replay_system_reset(void);
455 void replay_soft_interrupts(void);
456
457 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
458 {
459         if (!arch_irq_disabled_regs(regs))
460                 local_irq_enable();
461 }
462
463 #endif /* _ASM_POWERPC_INTERRUPT_H */