1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * Handle hardware traps and faults.
14 #include <linux/spinlock.h>
15 #include <linux/kprobes.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched/debug.h>
18 #include <linux/nmi.h>
19 #include <linux/debugfs.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/ratelimit.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/atomic.h>
26 #include <linux/sched/clock.h>
28 #include <asm/cpu_entry_area.h>
29 #include <asm/traps.h>
30 #include <asm/mach_traps.h>
32 #include <asm/x86_init.h>
33 #include <asm/reboot.h>
34 #include <asm/cache.h>
35 #include <asm/nospec-branch.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/nmi.h>
43 struct list_head head;
46 static struct nmi_desc nmi_desc[NMI_MAX] =
49 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
50 .head = LIST_HEAD_INIT(nmi_desc[0].head),
53 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
54 .head = LIST_HEAD_INIT(nmi_desc[1].head),
57 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
58 .head = LIST_HEAD_INIT(nmi_desc[2].head),
61 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
62 .head = LIST_HEAD_INIT(nmi_desc[3].head),
70 unsigned int external;
74 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
76 static int ignore_nmis __read_mostly;
78 int unknown_nmi_panic;
80 * Prevent NMI reason port (0x61) being accessed simultaneously, can
81 * only be used in NMI handler.
83 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
85 static int __init setup_unknown_nmi_panic(char *str)
87 unknown_nmi_panic = 1;
90 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
92 #define nmi_to_desc(type) (&nmi_desc[type])
94 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
96 static int __init nmi_warning_debugfs(void)
98 debugfs_create_u64("nmi_longest_ns", 0644,
99 arch_debugfs_dir, &nmi_longest_ns);
102 fs_initcall(nmi_warning_debugfs);
104 static void nmi_check_duration(struct nmiaction *action, u64 duration)
106 int remainder_ns, decimal_msecs;
108 if (duration < nmi_longest_ns || duration < action->max_duration)
111 action->max_duration = duration;
113 remainder_ns = do_div(duration, (1000 * 1000));
114 decimal_msecs = remainder_ns / 1000;
116 printk_ratelimited(KERN_INFO
117 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
118 action->handler, duration, decimal_msecs);
121 static int nmi_handle(unsigned int type, struct pt_regs *regs)
123 struct nmi_desc *desc = nmi_to_desc(type);
130 * NMIs are edge-triggered, which means if you have enough
131 * of them concurrently, you can lose some because only one
132 * can be latched at any given time. Walk the whole list
133 * to handle those situations.
135 list_for_each_entry_rcu(a, &desc->head, list) {
139 delta = sched_clock();
140 thishandled = a->handler(type, regs);
141 handled += thishandled;
142 delta = sched_clock() - delta;
143 trace_nmi_handler(a->handler, (int)delta, thishandled);
145 nmi_check_duration(a, delta);
150 /* return total number of NMI events handled */
153 NOKPROBE_SYMBOL(nmi_handle);
155 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
157 struct nmi_desc *desc = nmi_to_desc(type);
160 if (!action->handler)
163 raw_spin_lock_irqsave(&desc->lock, flags);
166 * Indicate if there are multiple registrations on the
167 * internal NMI handler call chains (SERR and IO_CHECK).
169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
173 * some handlers need to be executed first otherwise a fake
174 * event confuses some handlers (kdump uses this flag)
176 if (action->flags & NMI_FLAG_FIRST)
177 list_add_rcu(&action->list, &desc->head);
179 list_add_tail_rcu(&action->list, &desc->head);
181 raw_spin_unlock_irqrestore(&desc->lock, flags);
184 EXPORT_SYMBOL(__register_nmi_handler);
186 void unregister_nmi_handler(unsigned int type, const char *name)
188 struct nmi_desc *desc = nmi_to_desc(type);
192 raw_spin_lock_irqsave(&desc->lock, flags);
194 list_for_each_entry_rcu(n, &desc->head, list) {
196 * the name passed in to describe the nmi handler
197 * is used as the lookup key
199 if (!strcmp(n->name, name)) {
201 "Trying to free NMI (%s) from NMI context!\n", n->name);
202 list_del_rcu(&n->list);
207 raw_spin_unlock_irqrestore(&desc->lock, flags);
210 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
213 pci_serr_error(unsigned char reason, struct pt_regs *regs)
215 /* check to see if anyone registered against these types of errors */
216 if (nmi_handle(NMI_SERR, regs))
219 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
220 reason, smp_processor_id());
222 if (panic_on_unrecovered_nmi)
223 nmi_panic(regs, "NMI: Not continuing");
225 pr_emerg("Dazed and confused, but trying to continue\n");
227 /* Clear and disable the PCI SERR error line. */
228 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
229 outb(reason, NMI_REASON_PORT);
231 NOKPROBE_SYMBOL(pci_serr_error);
234 io_check_error(unsigned char reason, struct pt_regs *regs)
238 /* check to see if anyone registered against these types of errors */
239 if (nmi_handle(NMI_IO_CHECK, regs))
243 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
244 reason, smp_processor_id());
247 if (panic_on_io_nmi) {
248 nmi_panic(regs, "NMI IOCK error: Not continuing");
251 * If we end up here, it means we have received an NMI while
252 * processing panic(). Simply return without delaying and
258 /* Re-enable the IOCK line, wait for a few seconds */
259 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
260 outb(reason, NMI_REASON_PORT);
264 touch_nmi_watchdog();
268 reason &= ~NMI_REASON_CLEAR_IOCHK;
269 outb(reason, NMI_REASON_PORT);
271 NOKPROBE_SYMBOL(io_check_error);
274 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
279 * Use 'false' as back-to-back NMIs are dealt with one level up.
280 * Of course this makes having multiple 'unknown' handlers useless
281 * as only the first one is ever run (unless it can actually determine
282 * if it caused the NMI)
284 handled = nmi_handle(NMI_UNKNOWN, regs);
286 __this_cpu_add(nmi_stats.unknown, handled);
290 __this_cpu_add(nmi_stats.unknown, 1);
292 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
293 reason, smp_processor_id());
295 pr_emerg("Do you have a strange power saving mode enabled?\n");
296 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
297 nmi_panic(regs, "NMI: Not continuing");
299 pr_emerg("Dazed and confused, but trying to continue\n");
301 NOKPROBE_SYMBOL(unknown_nmi_error);
303 static DEFINE_PER_CPU(bool, swallow_nmi);
304 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
306 static noinstr void default_do_nmi(struct pt_regs *regs)
308 unsigned char reason = 0;
313 * CPU-specific NMI must be processed before non-CPU-specific
314 * NMI, otherwise we may lose it, because the CPU-specific
315 * NMI can not be detected/processed on other CPUs.
319 * Back-to-back NMIs are interesting because they can either
320 * be two NMI or more than two NMIs (any thing over two is dropped
321 * due to NMI being edge-triggered). If this is the second half
322 * of the back-to-back NMI, assume we dropped things and process
323 * more handlers. Otherwise reset the 'swallow' NMI behaviour
325 if (regs->ip == __this_cpu_read(last_nmi_rip))
328 __this_cpu_write(swallow_nmi, false);
330 __this_cpu_write(last_nmi_rip, regs->ip);
332 instrumentation_begin();
334 handled = nmi_handle(NMI_LOCAL, regs);
335 __this_cpu_add(nmi_stats.normal, handled);
338 * There are cases when a NMI handler handles multiple
339 * events in the current NMI. One of these events may
340 * be queued for in the next NMI. Because the event is
341 * already handled, the next NMI will result in an unknown
342 * NMI. Instead lets flag this for a potential NMI to
346 __this_cpu_write(swallow_nmi, true);
351 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
353 * Another CPU may be processing panic routines while holding
354 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
355 * and if so, call its callback directly. If there is no CPU preparing
356 * crash dump, we simply loop here.
358 while (!raw_spin_trylock(&nmi_reason_lock)) {
359 run_crash_ipi_callback(regs);
363 reason = x86_platform.get_nmi_reason();
365 if (reason & NMI_REASON_MASK) {
366 if (reason & NMI_REASON_SERR)
367 pci_serr_error(reason, regs);
368 else if (reason & NMI_REASON_IOCHK)
369 io_check_error(reason, regs);
372 * Reassert NMI in case it became active
373 * meanwhile as it's edge-triggered:
377 __this_cpu_add(nmi_stats.external, 1);
378 raw_spin_unlock(&nmi_reason_lock);
381 raw_spin_unlock(&nmi_reason_lock);
384 * Only one NMI can be latched at a time. To handle
385 * this we may process multiple nmi handlers at once to
386 * cover the case where an NMI is dropped. The downside
387 * to this approach is we may process an NMI prematurely,
388 * while its real NMI is sitting latched. This will cause
389 * an unknown NMI on the next run of the NMI processing.
391 * We tried to flag that condition above, by setting the
392 * swallow_nmi flag when we process more than one event.
393 * This condition is also only present on the second half
394 * of a back-to-back NMI, so we flag that condition too.
396 * If both are true, we assume we already processed this
397 * NMI previously and we swallow it. Otherwise we reset
400 * There are scenarios where we may accidentally swallow
401 * a 'real' unknown NMI. For example, while processing
402 * a perf NMI another perf NMI comes in along with a
403 * 'real' unknown NMI. These two NMIs get combined into
404 * one (as described above). When the next NMI gets
405 * processed, it will be flagged by perf as handled, but
406 * no one will know that there was a 'real' unknown NMI sent
407 * also. As a result it gets swallowed. Or if the first
408 * perf NMI returns two events handled then the second
409 * NMI will get eaten by the logic below, again losing a
410 * 'real' unknown NMI. But this is the best we can do
413 if (b2b && __this_cpu_read(swallow_nmi))
414 __this_cpu_add(nmi_stats.swallow, 1);
416 unknown_nmi_error(reason, regs);
419 instrumentation_end();
423 * NMIs can page fault or hit breakpoints which will cause it to lose
424 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
426 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
427 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
428 * if the outer NMI came from kernel mode, but we can still nest if the
429 * outer NMI came from user mode.
431 * To handle these nested NMIs, we have three states:
437 * When no NMI is in progress, it is in the "not running" state.
438 * When an NMI comes in, it goes into the "executing" state.
439 * Normally, if another NMI is triggered, it does not interrupt
440 * the running NMI and the HW will simply latch it so that when
441 * the first NMI finishes, it will restart the second NMI.
442 * (Note, the latch is binary, thus multiple NMIs triggering,
443 * when one is running, are ignored. Only one NMI is restarted.)
445 * If an NMI executes an iret, another NMI can preempt it. We do not
446 * want to allow this new NMI to run, but we want to execute it when the
447 * first one finishes. We set the state to "latched", and the exit of
448 * the first NMI will perform a dec_return, if the result is zero
449 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
450 * dec_return would have set the state to NMI_EXECUTING (what we want it
451 * to be when we are running). In this case, we simply jump back to
452 * rerun the NMI handler again, and restart the 'latched' NMI.
454 * No trap (breakpoint or page fault) should be hit before nmi_restart,
455 * thus there is no race between the first check of state for NOT_RUNNING
456 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
459 * In case the NMI takes a page fault, we need to save off the CR2
460 * because the NMI could have preempted another page fault and corrupt
461 * the CR2 that is about to be read. As nested NMIs must be restarted
462 * and they can not take breakpoints or page faults, the update of the
463 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
464 * Otherwise, there would be a race of another nested NMI coming in
465 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
472 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
473 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
474 static DEFINE_PER_CPU(unsigned long, nmi_dr7);
476 DEFINE_IDTENTRY_RAW(exc_nmi)
478 irqentry_state_t irq_state;
481 * Re-enable NMIs right here when running as an SEV-ES guest. This might
482 * cause nested NMIs, but those can be handled safely.
484 sev_es_nmi_complete();
486 if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
489 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
490 this_cpu_write(nmi_state, NMI_LATCHED);
493 this_cpu_write(nmi_state, NMI_EXECUTING);
494 this_cpu_write(nmi_cr2, read_cr2());
498 * Needs to happen before DR7 is accessed, because the hypervisor can
499 * intercept DR7 reads/writes, turning those into #VC exceptions.
501 sev_es_ist_enter(regs);
503 this_cpu_write(nmi_dr7, local_db_save());
505 irq_state = irqentry_nmi_enter(regs);
507 inc_irq_stat(__nmi_count);
510 default_do_nmi(regs);
512 irqentry_nmi_exit(regs, irq_state);
514 local_db_restore(this_cpu_read(nmi_dr7));
518 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
519 write_cr2(this_cpu_read(nmi_cr2));
520 if (this_cpu_dec_return(nmi_state))
524 mds_user_clear_cpu_buffers();
527 #if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
528 DEFINE_IDTENTRY_RAW(exc_nmi_noist)
533 #if IS_MODULE(CONFIG_KVM_INTEL)
534 EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
542 void restart_nmi(void)
547 /* reset the back-to-back NMI logic */
548 void local_touch_nmi(void)
550 __this_cpu_write(last_nmi_rip, 0);
552 EXPORT_SYMBOL_GPL(local_touch_nmi);