Merge tag 'libnvdimm-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-2.6-microblaze.git] / arch / x86 / kernel / irq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Common interrupt code for 32 and 64 bit
4  */
5 #include <linux/cpu.h>
6 #include <linux/interrupt.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/of.h>
9 #include <linux/seq_file.h>
10 #include <linux/smp.h>
11 #include <linux/ftrace.h>
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/irq.h>
15
16 #include <asm/irq_stack.h>
17 #include <asm/apic.h>
18 #include <asm/io_apic.h>
19 #include <asm/irq.h>
20 #include <asm/mce.h>
21 #include <asm/hw_irq.h>
22 #include <asm/desc.h>
23 #include <asm/traps.h>
24 #include <asm/thermal.h>
25
26 #define CREATE_TRACE_POINTS
27 #include <asm/trace/irq_vectors.h>
28
29 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 EXPORT_PER_CPU_SYMBOL(irq_stat);
31
32 atomic_t irq_err_count;
33
34 /*
35  * 'what should we do if we get a hw irq event on an illegal vector'.
36  * each architecture has to answer this themselves.
37  */
38 void ack_bad_irq(unsigned int irq)
39 {
40         if (printk_ratelimit())
41                 pr_err("unexpected IRQ trap at vector %02x\n", irq);
42
43         /*
44          * Currently unexpected vectors happen only on SMP and APIC.
45          * We _must_ ack these because every local APIC has only N
46          * irq slots per priority level, and a 'hanging, unacked' IRQ
47          * holds up an irq slot - in excessive cases (when multiple
48          * unexpected vectors occur) that might lock up the APIC
49          * completely.
50          * But only ack when the APIC is enabled -AK
51          */
52         ack_APIC_irq();
53 }
54
55 #define irq_stats(x)            (&per_cpu(irq_stat, x))
56 /*
57  * /proc/interrupts printing for arch specific interrupts
58  */
59 int arch_show_interrupts(struct seq_file *p, int prec)
60 {
61         int j;
62
63         seq_printf(p, "%*s: ", prec, "NMI");
64         for_each_online_cpu(j)
65                 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
66         seq_puts(p, "  Non-maskable interrupts\n");
67 #ifdef CONFIG_X86_LOCAL_APIC
68         seq_printf(p, "%*s: ", prec, "LOC");
69         for_each_online_cpu(j)
70                 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
71         seq_puts(p, "  Local timer interrupts\n");
72
73         seq_printf(p, "%*s: ", prec, "SPU");
74         for_each_online_cpu(j)
75                 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
76         seq_puts(p, "  Spurious interrupts\n");
77         seq_printf(p, "%*s: ", prec, "PMI");
78         for_each_online_cpu(j)
79                 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
80         seq_puts(p, "  Performance monitoring interrupts\n");
81         seq_printf(p, "%*s: ", prec, "IWI");
82         for_each_online_cpu(j)
83                 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
84         seq_puts(p, "  IRQ work interrupts\n");
85         seq_printf(p, "%*s: ", prec, "RTR");
86         for_each_online_cpu(j)
87                 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
88         seq_puts(p, "  APIC ICR read retries\n");
89         if (x86_platform_ipi_callback) {
90                 seq_printf(p, "%*s: ", prec, "PLT");
91                 for_each_online_cpu(j)
92                         seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
93                 seq_puts(p, "  Platform interrupts\n");
94         }
95 #endif
96 #ifdef CONFIG_SMP
97         seq_printf(p, "%*s: ", prec, "RES");
98         for_each_online_cpu(j)
99                 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
100         seq_puts(p, "  Rescheduling interrupts\n");
101         seq_printf(p, "%*s: ", prec, "CAL");
102         for_each_online_cpu(j)
103                 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
104         seq_puts(p, "  Function call interrupts\n");
105         seq_printf(p, "%*s: ", prec, "TLB");
106         for_each_online_cpu(j)
107                 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
108         seq_puts(p, "  TLB shootdowns\n");
109 #endif
110 #ifdef CONFIG_X86_THERMAL_VECTOR
111         seq_printf(p, "%*s: ", prec, "TRM");
112         for_each_online_cpu(j)
113                 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
114         seq_puts(p, "  Thermal event interrupts\n");
115 #endif
116 #ifdef CONFIG_X86_MCE_THRESHOLD
117         seq_printf(p, "%*s: ", prec, "THR");
118         for_each_online_cpu(j)
119                 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
120         seq_puts(p, "  Threshold APIC interrupts\n");
121 #endif
122 #ifdef CONFIG_X86_MCE_AMD
123         seq_printf(p, "%*s: ", prec, "DFR");
124         for_each_online_cpu(j)
125                 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
126         seq_puts(p, "  Deferred Error APIC interrupts\n");
127 #endif
128 #ifdef CONFIG_X86_MCE
129         seq_printf(p, "%*s: ", prec, "MCE");
130         for_each_online_cpu(j)
131                 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
132         seq_puts(p, "  Machine check exceptions\n");
133         seq_printf(p, "%*s: ", prec, "MCP");
134         for_each_online_cpu(j)
135                 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
136         seq_puts(p, "  Machine check polls\n");
137 #endif
138 #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
139         if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
140                 seq_printf(p, "%*s: ", prec, "HYP");
141                 for_each_online_cpu(j)
142                         seq_printf(p, "%10u ",
143                                    irq_stats(j)->irq_hv_callback_count);
144                 seq_puts(p, "  Hypervisor callback interrupts\n");
145         }
146 #endif
147 #if IS_ENABLED(CONFIG_HYPERV)
148         if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
149                 seq_printf(p, "%*s: ", prec, "HRE");
150                 for_each_online_cpu(j)
151                         seq_printf(p, "%10u ",
152                                    irq_stats(j)->irq_hv_reenlightenment_count);
153                 seq_puts(p, "  Hyper-V reenlightenment interrupts\n");
154         }
155         if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
156                 seq_printf(p, "%*s: ", prec, "HVS");
157                 for_each_online_cpu(j)
158                         seq_printf(p, "%10u ",
159                                    irq_stats(j)->hyperv_stimer0_count);
160                 seq_puts(p, "  Hyper-V stimer0 interrupts\n");
161         }
162 #endif
163         seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
164 #if defined(CONFIG_X86_IO_APIC)
165         seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
166 #endif
167 #ifdef CONFIG_HAVE_KVM
168         seq_printf(p, "%*s: ", prec, "PIN");
169         for_each_online_cpu(j)
170                 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
171         seq_puts(p, "  Posted-interrupt notification event\n");
172
173         seq_printf(p, "%*s: ", prec, "NPI");
174         for_each_online_cpu(j)
175                 seq_printf(p, "%10u ",
176                            irq_stats(j)->kvm_posted_intr_nested_ipis);
177         seq_puts(p, "  Nested posted-interrupt event\n");
178
179         seq_printf(p, "%*s: ", prec, "PIW");
180         for_each_online_cpu(j)
181                 seq_printf(p, "%10u ",
182                            irq_stats(j)->kvm_posted_intr_wakeup_ipis);
183         seq_puts(p, "  Posted-interrupt wakeup event\n");
184 #endif
185         return 0;
186 }
187
188 /*
189  * /proc/stat helpers
190  */
191 u64 arch_irq_stat_cpu(unsigned int cpu)
192 {
193         u64 sum = irq_stats(cpu)->__nmi_count;
194
195 #ifdef CONFIG_X86_LOCAL_APIC
196         sum += irq_stats(cpu)->apic_timer_irqs;
197         sum += irq_stats(cpu)->irq_spurious_count;
198         sum += irq_stats(cpu)->apic_perf_irqs;
199         sum += irq_stats(cpu)->apic_irq_work_irqs;
200         sum += irq_stats(cpu)->icr_read_retry_count;
201         if (x86_platform_ipi_callback)
202                 sum += irq_stats(cpu)->x86_platform_ipis;
203 #endif
204 #ifdef CONFIG_SMP
205         sum += irq_stats(cpu)->irq_resched_count;
206         sum += irq_stats(cpu)->irq_call_count;
207 #endif
208 #ifdef CONFIG_X86_THERMAL_VECTOR
209         sum += irq_stats(cpu)->irq_thermal_count;
210 #endif
211 #ifdef CONFIG_X86_MCE_THRESHOLD
212         sum += irq_stats(cpu)->irq_threshold_count;
213 #endif
214 #ifdef CONFIG_X86_MCE
215         sum += per_cpu(mce_exception_count, cpu);
216         sum += per_cpu(mce_poll_count, cpu);
217 #endif
218         return sum;
219 }
220
221 u64 arch_irq_stat(void)
222 {
223         u64 sum = atomic_read(&irq_err_count);
224         return sum;
225 }
226
227 static __always_inline void handle_irq(struct irq_desc *desc,
228                                        struct pt_regs *regs)
229 {
230         if (IS_ENABLED(CONFIG_X86_64))
231                 run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
232         else
233                 __handle_irq(desc, regs);
234 }
235
236 /*
237  * common_interrupt() handles all normal device IRQ's (the special SMP
238  * cross-CPU interrupts have their own entry points).
239  */
240 DEFINE_IDTENTRY_IRQ(common_interrupt)
241 {
242         struct pt_regs *old_regs = set_irq_regs(regs);
243         struct irq_desc *desc;
244
245         /* entry code tells RCU that we're not quiescent.  Check it. */
246         RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
247
248         desc = __this_cpu_read(vector_irq[vector]);
249         if (likely(!IS_ERR_OR_NULL(desc))) {
250                 handle_irq(desc, regs);
251         } else {
252                 ack_APIC_irq();
253
254                 if (desc == VECTOR_UNUSED) {
255                         pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
256                                              __func__, smp_processor_id(),
257                                              vector);
258                 } else {
259                         __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
260                 }
261         }
262
263         set_irq_regs(old_regs);
264 }
265
266 #ifdef CONFIG_X86_LOCAL_APIC
267 /* Function pointer for generic interrupt vector handling */
268 void (*x86_platform_ipi_callback)(void) = NULL;
269 /*
270  * Handler for X86_PLATFORM_IPI_VECTOR.
271  */
272 DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
273 {
274         struct pt_regs *old_regs = set_irq_regs(regs);
275
276         ack_APIC_irq();
277         trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
278         inc_irq_stat(x86_platform_ipis);
279         if (x86_platform_ipi_callback)
280                 x86_platform_ipi_callback();
281         trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
282         set_irq_regs(old_regs);
283 }
284 #endif
285
286 #ifdef CONFIG_HAVE_KVM
287 static void dummy_handler(void) {}
288 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
289
290 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
291 {
292         if (handler)
293                 kvm_posted_intr_wakeup_handler = handler;
294         else
295                 kvm_posted_intr_wakeup_handler = dummy_handler;
296 }
297 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
298
299 /*
300  * Handler for POSTED_INTERRUPT_VECTOR.
301  */
302 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
303 {
304         ack_APIC_irq();
305         inc_irq_stat(kvm_posted_intr_ipis);
306 }
307
308 /*
309  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
310  */
311 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
312 {
313         ack_APIC_irq();
314         inc_irq_stat(kvm_posted_intr_wakeup_ipis);
315         kvm_posted_intr_wakeup_handler();
316 }
317
318 /*
319  * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
320  */
321 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
322 {
323         ack_APIC_irq();
324         inc_irq_stat(kvm_posted_intr_nested_ipis);
325 }
326 #endif
327
328
329 #ifdef CONFIG_HOTPLUG_CPU
330 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
331 void fixup_irqs(void)
332 {
333         unsigned int irr, vector;
334         struct irq_desc *desc;
335         struct irq_data *data;
336         struct irq_chip *chip;
337
338         irq_migrate_all_off_this_cpu();
339
340         /*
341          * We can remove mdelay() and then send spuriuous interrupts to
342          * new cpu targets for all the irqs that were handled previously by
343          * this cpu. While it works, I have seen spurious interrupt messages
344          * (nothing wrong but still...).
345          *
346          * So for now, retain mdelay(1) and check the IRR and then send those
347          * interrupts to new targets as this cpu is already offlined...
348          */
349         mdelay(1);
350
351         /*
352          * We can walk the vector array of this cpu without holding
353          * vector_lock because the cpu is already marked !online, so
354          * nothing else will touch it.
355          */
356         for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
357                 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
358                         continue;
359
360                 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
361                 if (irr  & (1 << (vector % 32))) {
362                         desc = __this_cpu_read(vector_irq[vector]);
363
364                         raw_spin_lock(&desc->lock);
365                         data = irq_desc_get_irq_data(desc);
366                         chip = irq_data_get_irq_chip(data);
367                         if (chip->irq_retrigger) {
368                                 chip->irq_retrigger(data);
369                                 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
370                         }
371                         raw_spin_unlock(&desc->lock);
372                 }
373                 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
374                         __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
375         }
376 }
377 #endif
378
379 #ifdef CONFIG_X86_THERMAL_VECTOR
380 static void smp_thermal_vector(void)
381 {
382         if (x86_thermal_enabled())
383                 intel_thermal_interrupt();
384         else
385                 pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
386                        smp_processor_id());
387 }
388
389 DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
390 {
391         trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
392         inc_irq_stat(irq_thermal_count);
393         smp_thermal_vector();
394         trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
395         ack_APIC_irq();
396 }
397 #endif