2 * linux/arch/i386/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
13 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/config.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/nmi.h>
21 #include <linux/sysdev.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu.h>
24 #include <linux/dmi.h>
28 #include <asm/kdebug.h>
30 #include "mach_traps.h"
32 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
33 * evtsel_nmi_owner tracks the ownership of the event selection
34 * - different performance counters/ event selection may be reserved for
35 * different subsystems this reservation system just tries to coordinate
38 static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
39 static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
41 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
42 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
44 #define NMI_MAX_COUNTER_BITS 66
47 * >0: the lapic NMI watchdog is active, but can be disabled
48 * <0: the lapic NMI watchdog has not been set up, and cannot
50 * 0: the lapic NMI watchdog is disabled, but can be enabled
52 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
54 unsigned int nmi_watchdog = NMI_DEFAULT;
55 static unsigned int nmi_hz = HZ;
57 struct nmi_watchdog_ctlblk {
60 unsigned int cccr_msr;
61 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
62 unsigned int evntsel_msr; /* the MSR to select the events to handle */
64 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
66 /* local prototypes */
67 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
69 extern void show_registers(struct pt_regs *regs);
70 extern int unknown_nmi_panic;
72 /* converts an msr to an appropriate reservation bit */
73 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
75 /* returns the bit offset of the performance counter register */
76 switch (boot_cpu_data.x86_vendor) {
78 return (msr - MSR_K7_PERFCTR0);
79 case X86_VENDOR_INTEL:
80 switch (boot_cpu_data.x86) {
82 return (msr - MSR_P6_PERFCTR0);
84 return (msr - MSR_P4_BPU_PERFCTR0);
90 /* converts an msr to an appropriate reservation bit */
91 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
93 /* returns the bit offset of the event selection register */
94 switch (boot_cpu_data.x86_vendor) {
96 return (msr - MSR_K7_EVNTSEL0);
97 case X86_VENDOR_INTEL:
98 switch (boot_cpu_data.x86) {
100 return (msr - MSR_P6_EVNTSEL0);
102 return (msr - MSR_P4_BSU_ESCR0);
108 /* checks for a bit availability (hack for oprofile) */
109 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
111 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
113 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
116 /* checks the an msr for availability */
117 int avail_to_resrv_perfctr_nmi(unsigned int msr)
119 unsigned int counter;
121 counter = nmi_perfctr_msr_to_bit(msr);
122 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
124 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
127 int reserve_perfctr_nmi(unsigned int msr)
129 unsigned int counter;
131 counter = nmi_perfctr_msr_to_bit(msr);
132 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
134 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
139 void release_perfctr_nmi(unsigned int msr)
141 unsigned int counter;
143 counter = nmi_perfctr_msr_to_bit(msr);
144 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
146 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
149 int reserve_evntsel_nmi(unsigned int msr)
151 unsigned int counter;
153 counter = nmi_evntsel_msr_to_bit(msr);
154 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
156 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
161 void release_evntsel_nmi(unsigned int msr)
163 unsigned int counter;
165 counter = nmi_evntsel_msr_to_bit(msr);
166 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
168 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
171 static __cpuinit inline int nmi_known_cpu(void)
173 switch (boot_cpu_data.x86_vendor) {
175 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
176 case X86_VENDOR_INTEL:
177 return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6));
183 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
184 * the CPU is idle. To make sure the NMI watchdog really ticks on all
185 * CPUs during the test make them busy.
187 static __init void nmi_cpu_busy(void *data)
189 volatile int *endflag = data;
190 local_irq_enable_in_hardirq();
191 /* Intentionally don't use cpu_relax here. This is
192 to make sure that the performance counter really ticks,
193 even if there is a simulator or similar that catches the
194 pause instruction. On a real HT machine this is fine because
195 all other CPUs are busy with "useless" delay loops and don't
196 care if they get somewhat less cycles. */
197 while (*endflag == 0)
202 static int __init check_nmi_watchdog(void)
204 volatile int endflag = 0;
205 unsigned int *prev_nmi_count;
208 /* Enable NMI watchdog for newer systems.
209 Actually it should be safe for most systems before 2004 too except
210 for some IBM systems that corrupt registers when NMI happens
211 during SMM. Unfortunately we don't have more exact information
212 on these and use this coarse check. */
213 if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004)
214 nmi_watchdog = NMI_LOCAL_APIC;
216 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
219 if (!atomic_read(&nmi_active))
222 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
226 printk(KERN_INFO "Testing NMI watchdog ... ");
228 if (nmi_watchdog == NMI_LOCAL_APIC)
229 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
231 for_each_possible_cpu(cpu)
232 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
234 mdelay((10*1000)/nmi_hz); // wait 10 ticks
236 for_each_possible_cpu(cpu) {
238 /* Check cpu_callin_map here because that is set
239 after the timer is started. */
240 if (!cpu_isset(cpu, cpu_callin_map))
243 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
245 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
246 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
250 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
251 atomic_dec(&nmi_active);
254 if (!atomic_read(&nmi_active)) {
255 kfree(prev_nmi_count);
256 atomic_set(&nmi_active, -1);
262 /* now that we know it works we can reduce NMI frequency to
263 something more reasonable; makes a difference in some configs */
264 if (nmi_watchdog == NMI_LOCAL_APIC)
267 kfree(prev_nmi_count);
270 /* This needs to happen later in boot so counters are working */
271 late_initcall(check_nmi_watchdog);
273 static int __init setup_nmi_watchdog(char *str)
277 get_option(&str, &nmi);
279 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
282 * If any other x86 CPU has a local APIC, then
283 * please test the NMI stuff there and send me the
284 * missing bits. Right now Intel P6/P4 and AMD K7 only.
286 if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
287 return 0; /* no lapic support */
292 __setup("nmi_watchdog=", setup_nmi_watchdog);
294 static void disable_lapic_nmi_watchdog(void)
296 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
298 if (atomic_read(&nmi_active) <= 0)
301 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
303 BUG_ON(atomic_read(&nmi_active) != 0);
306 static void enable_lapic_nmi_watchdog(void)
308 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
310 /* are we already enabled */
311 if (atomic_read(&nmi_active) != 0)
314 /* are we lapic aware */
315 if (nmi_known_cpu() <= 0)
318 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
319 touch_nmi_watchdog();
322 void disable_timer_nmi_watchdog(void)
324 BUG_ON(nmi_watchdog != NMI_IO_APIC);
326 if (atomic_read(&nmi_active) <= 0)
330 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
332 BUG_ON(atomic_read(&nmi_active) != 0);
335 void enable_timer_nmi_watchdog(void)
337 BUG_ON(nmi_watchdog != NMI_IO_APIC);
339 if (atomic_read(&nmi_active) == 0) {
340 touch_nmi_watchdog();
341 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
348 static int nmi_pm_active; /* nmi_active before suspend */
350 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
352 /* only CPU0 goes here, other CPUs should be offline */
353 nmi_pm_active = atomic_read(&nmi_active);
354 stop_apic_nmi_watchdog(NULL);
355 BUG_ON(atomic_read(&nmi_active) != 0);
359 static int lapic_nmi_resume(struct sys_device *dev)
361 /* only CPU0 goes here, other CPUs should be offline */
362 if (nmi_pm_active > 0) {
363 setup_apic_nmi_watchdog(NULL);
364 touch_nmi_watchdog();
370 static struct sysdev_class nmi_sysclass = {
371 set_kset_name("lapic_nmi"),
372 .resume = lapic_nmi_resume,
373 .suspend = lapic_nmi_suspend,
376 static struct sys_device device_lapic_nmi = {
378 .cls = &nmi_sysclass,
381 static int __init init_lapic_nmi_sysfs(void)
385 /* should really be a BUG_ON but b/c this is an
386 * init call, it just doesn't work. -dcz
388 if (nmi_watchdog != NMI_LOCAL_APIC)
391 if ( atomic_read(&nmi_active) < 0 )
394 error = sysdev_class_register(&nmi_sysclass);
396 error = sysdev_register(&device_lapic_nmi);
399 /* must come after the local APIC's device_initcall() */
400 late_initcall(init_lapic_nmi_sysfs);
402 #endif /* CONFIG_PM */
405 * Activate the NMI watchdog via the local APIC.
406 * Original code written by Keith Owens.
409 static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr)
411 u64 count = (u64)cpu_khz * 1000;
413 do_div(count, nmi_hz);
415 Dprintk("setting %s to -0x%08Lx\n", descr, count);
416 wrmsrl(perfctr_msr, 0 - count);
419 /* Note that these events don't tick when the CPU idles. This means
420 the frequency varies with CPU load. */
422 #define K7_EVNTSEL_ENABLE (1 << 22)
423 #define K7_EVNTSEL_INT (1 << 20)
424 #define K7_EVNTSEL_OS (1 << 17)
425 #define K7_EVNTSEL_USR (1 << 16)
426 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
427 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
429 static int setup_k7_watchdog(void)
431 unsigned int perfctr_msr, evntsel_msr;
432 unsigned int evntsel;
433 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
435 perfctr_msr = MSR_K7_PERFCTR0;
436 evntsel_msr = MSR_K7_EVNTSEL0;
437 if (!reserve_perfctr_nmi(perfctr_msr))
440 if (!reserve_evntsel_nmi(evntsel_msr))
443 wrmsrl(perfctr_msr, 0UL);
445 evntsel = K7_EVNTSEL_INT
450 /* setup the timer */
451 wrmsr(evntsel_msr, evntsel, 0);
452 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0");
453 apic_write(APIC_LVTPC, APIC_DM_NMI);
454 evntsel |= K7_EVNTSEL_ENABLE;
455 wrmsr(evntsel_msr, evntsel, 0);
457 wd->perfctr_msr = perfctr_msr;
458 wd->evntsel_msr = evntsel_msr;
459 wd->cccr_msr = 0; //unused
460 wd->check_bit = 1ULL<<63;
463 release_perfctr_nmi(perfctr_msr);
468 static void stop_k7_watchdog(void)
470 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
472 wrmsr(wd->evntsel_msr, 0, 0);
474 release_evntsel_nmi(wd->evntsel_msr);
475 release_perfctr_nmi(wd->perfctr_msr);
478 #define P6_EVNTSEL0_ENABLE (1 << 22)
479 #define P6_EVNTSEL_INT (1 << 20)
480 #define P6_EVNTSEL_OS (1 << 17)
481 #define P6_EVNTSEL_USR (1 << 16)
482 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
483 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
485 static int setup_p6_watchdog(void)
487 unsigned int perfctr_msr, evntsel_msr;
488 unsigned int evntsel;
489 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
491 perfctr_msr = MSR_P6_PERFCTR0;
492 evntsel_msr = MSR_P6_EVNTSEL0;
493 if (!reserve_perfctr_nmi(perfctr_msr))
496 if (!reserve_evntsel_nmi(evntsel_msr))
499 wrmsrl(perfctr_msr, 0UL);
501 evntsel = P6_EVNTSEL_INT
506 /* setup the timer */
507 wrmsr(evntsel_msr, evntsel, 0);
508 write_watchdog_counter(perfctr_msr, "P6_PERFCTR0");
509 apic_write(APIC_LVTPC, APIC_DM_NMI);
510 evntsel |= P6_EVNTSEL0_ENABLE;
511 wrmsr(evntsel_msr, evntsel, 0);
513 wd->perfctr_msr = perfctr_msr;
514 wd->evntsel_msr = evntsel_msr;
515 wd->cccr_msr = 0; //unused
516 wd->check_bit = 1ULL<<39;
519 release_perfctr_nmi(perfctr_msr);
524 static void stop_p6_watchdog(void)
526 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
528 wrmsr(wd->evntsel_msr, 0, 0);
530 release_evntsel_nmi(wd->evntsel_msr);
531 release_perfctr_nmi(wd->perfctr_msr);
534 /* Note that these events don't tick when the CPU idles. This means
535 the frequency varies with CPU load. */
537 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
538 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
539 #define P4_ESCR_OS (1<<3)
540 #define P4_ESCR_USR (1<<2)
541 #define P4_CCCR_OVF_PMI0 (1<<26)
542 #define P4_CCCR_OVF_PMI1 (1<<27)
543 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
544 #define P4_CCCR_COMPLEMENT (1<<19)
545 #define P4_CCCR_COMPARE (1<<18)
546 #define P4_CCCR_REQUIRED (3<<16)
547 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
548 #define P4_CCCR_ENABLE (1<<12)
549 #define P4_CCCR_OVF (1<<31)
550 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
551 CRU_ESCR0 (with any non-null event selector) through a complemented
552 max threshold. [IA32-Vol3, Section 14.9.9] */
554 static int setup_p4_watchdog(void)
556 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
557 unsigned int evntsel, cccr_val;
558 unsigned int misc_enable, dummy;
560 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
562 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
563 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
567 /* detect which hyperthread we are on */
568 if (smp_num_siblings == 2) {
569 unsigned int ebx, apicid;
572 apicid = (ebx >> 24) & 0xff;
578 /* performance counters are shared resources
579 * assign each hyperthread its own set
580 * (re-use the ESCR0 register, seems safe
581 * and keeps the cccr_val the same)
585 perfctr_msr = MSR_P4_IQ_PERFCTR0;
586 evntsel_msr = MSR_P4_CRU_ESCR0;
587 cccr_msr = MSR_P4_IQ_CCCR0;
588 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
591 perfctr_msr = MSR_P4_IQ_PERFCTR1;
592 evntsel_msr = MSR_P4_CRU_ESCR0;
593 cccr_msr = MSR_P4_IQ_CCCR1;
594 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
597 if (!reserve_perfctr_nmi(perfctr_msr))
600 if (!reserve_evntsel_nmi(evntsel_msr))
603 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
607 cccr_val |= P4_CCCR_THRESHOLD(15)
612 wrmsr(evntsel_msr, evntsel, 0);
613 wrmsr(cccr_msr, cccr_val, 0);
614 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0");
615 apic_write(APIC_LVTPC, APIC_DM_NMI);
616 cccr_val |= P4_CCCR_ENABLE;
617 wrmsr(cccr_msr, cccr_val, 0);
618 wd->perfctr_msr = perfctr_msr;
619 wd->evntsel_msr = evntsel_msr;
620 wd->cccr_msr = cccr_msr;
621 wd->check_bit = 1ULL<<39;
624 release_perfctr_nmi(perfctr_msr);
629 static void stop_p4_watchdog(void)
631 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
633 wrmsr(wd->cccr_msr, 0, 0);
634 wrmsr(wd->evntsel_msr, 0, 0);
636 release_evntsel_nmi(wd->evntsel_msr);
637 release_perfctr_nmi(wd->perfctr_msr);
640 void setup_apic_nmi_watchdog (void *unused)
642 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
644 /* only support LOCAL and IO APICs for now */
645 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
646 (nmi_watchdog != NMI_IO_APIC))
649 if (wd->enabled == 1)
652 /* cheap hack to support suspend/resume */
653 /* if cpu0 is not active neither should the other cpus */
654 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
657 if (nmi_watchdog == NMI_LOCAL_APIC) {
658 switch (boot_cpu_data.x86_vendor) {
660 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
662 if (!setup_k7_watchdog())
665 case X86_VENDOR_INTEL:
666 switch (boot_cpu_data.x86) {
668 if (boot_cpu_data.x86_model > 0xd)
671 if (!setup_p6_watchdog())
675 if (boot_cpu_data.x86_model > 0x4)
678 if (!setup_p4_watchdog())
690 atomic_inc(&nmi_active);
693 void stop_apic_nmi_watchdog(void *unused)
695 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
697 /* only support LOCAL and IO APICs for now */
698 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
699 (nmi_watchdog != NMI_IO_APIC))
702 if (wd->enabled == 0)
705 if (nmi_watchdog == NMI_LOCAL_APIC) {
706 switch (boot_cpu_data.x86_vendor) {
710 case X86_VENDOR_INTEL:
711 switch (boot_cpu_data.x86) {
713 if (boot_cpu_data.x86_model > 0xd)
718 if (boot_cpu_data.x86_model > 0x4)
729 atomic_dec(&nmi_active);
733 * the best way to detect whether a CPU has a 'hard lockup' problem
734 * is to check it's local APIC timer IRQ counts. If they are not
735 * changing then that CPU has some problem.
737 * as these watchdog NMI IRQs are generated on every CPU, we only
738 * have to check the current processor.
740 * since NMIs don't listen to _any_ locks, we have to be extremely
741 * careful not to rely on unsafe variables. The printk might lock
742 * up though, so we have to break up any console locks first ...
743 * [when there will be more tty-related locks, break them up
748 last_irq_sums [NR_CPUS],
749 alert_counter [NR_CPUS];
751 void touch_nmi_watchdog (void)
756 * Just reset the alert counters, (other CPUs might be
757 * spinning on locks we hold):
759 for_each_possible_cpu(i)
760 alert_counter[i] = 0;
763 * Tickle the softlockup detector too:
765 touch_softlockup_watchdog();
767 EXPORT_SYMBOL(touch_nmi_watchdog);
769 extern void die_nmi(struct pt_regs *, const char *msg);
771 int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
775 * Since current_thread_info()-> is always on the stack, and we
776 * always switch the stack NMI-atomically, it's safe to use
777 * smp_processor_id().
781 int cpu = smp_processor_id();
782 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
786 /* check for other users first */
787 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
793 sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
795 /* if the apic timer isn't firing, this cpu isn't doing much */
796 if (!touched && last_irq_sums[cpu] == sum) {
798 * Ayiee, looks like this CPU is stuck ...
799 * wait a few IRQs (5 seconds) before doing the oops ...
801 alert_counter[cpu]++;
802 if (alert_counter[cpu] == 5*nmi_hz)
804 * die_nmi will return ONLY if NOTIFY_STOP happens..
806 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
808 last_irq_sums[cpu] = sum;
809 alert_counter[cpu] = 0;
811 /* see if the nmi watchdog went off */
813 if (nmi_watchdog == NMI_LOCAL_APIC) {
814 rdmsrl(wd->perfctr_msr, dummy);
815 if (dummy & wd->check_bit){
816 /* this wasn't a watchdog timer interrupt */
820 /* only Intel P4 uses the cccr msr */
821 if (wd->cccr_msr != 0) {
824 * - An overflown perfctr will assert its interrupt
825 * until the OVF flag in its CCCR is cleared.
826 * - LVTPC is masked on interrupt and must be
827 * unmasked by the LVTPC handler.
829 rdmsrl(wd->cccr_msr, dummy);
830 dummy &= ~P4_CCCR_OVF;
831 wrmsrl(wd->cccr_msr, dummy);
832 apic_write(APIC_LVTPC, APIC_DM_NMI);
834 else if (wd->perfctr_msr == MSR_P6_PERFCTR0) {
835 /* Only P6 based Pentium M need to re-unmask
836 * the apic vector but it doesn't hurt
837 * other P6 variant */
838 apic_write(APIC_LVTPC, APIC_DM_NMI);
840 /* start the cycle over again */
841 write_watchdog_counter(wd->perfctr_msr, NULL);
843 } else if (nmi_watchdog == NMI_IO_APIC) {
844 /* don't know how to accurately check for this.
845 * just assume it was a watchdog timer interrupt
846 * This matches the old behaviour.
850 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
856 int do_nmi_callback(struct pt_regs * regs, int cpu)
859 if (unknown_nmi_panic)
860 return unknown_nmi_panic_callback(regs, cpu);
867 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
869 unsigned char reason = get_nmi_reason();
872 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
878 * proc handler for /proc/sys/kernel/nmi
880 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
881 void __user *buffer, size_t *length, loff_t *ppos)
885 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
886 old_state = nmi_watchdog_enabled;
887 proc_dointvec(table, write, file, buffer, length, ppos);
888 if (!!old_state == !!nmi_watchdog_enabled)
891 if (atomic_read(&nmi_active) < 0) {
892 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
896 if (nmi_watchdog == NMI_DEFAULT) {
897 if (nmi_known_cpu() > 0)
898 nmi_watchdog = NMI_LOCAL_APIC;
900 nmi_watchdog = NMI_IO_APIC;
903 if (nmi_watchdog == NMI_LOCAL_APIC) {
904 if (nmi_watchdog_enabled)
905 enable_lapic_nmi_watchdog();
907 disable_lapic_nmi_watchdog();
910 "NMI watchdog doesn't know what hardware to touch\n");
918 EXPORT_SYMBOL(nmi_active);
919 EXPORT_SYMBOL(nmi_watchdog);
920 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
921 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
922 EXPORT_SYMBOL(reserve_perfctr_nmi);
923 EXPORT_SYMBOL(release_perfctr_nmi);
924 EXPORT_SYMBOL(reserve_evntsel_nmi);
925 EXPORT_SYMBOL(release_evntsel_nmi);
926 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
927 EXPORT_SYMBOL(enable_timer_nmi_watchdog);