static DEFINE_MUTEX(watchdog_mutex);
-#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
-# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
-# define NMI_WATCHDOG_DEFAULT 1
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
+# define WATCHDOG_HARDLOCKUP_DEFAULT 1
#else
-# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
-# define NMI_WATCHDOG_DEFAULT 0
+# define WATCHDOG_HARDLOCKUP_DEFAULT 0
#endif
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = 1;
-int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
-int __read_mostly soft_watchdog_user_enabled = 1;
+static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
+static int __read_mostly watchdog_softlockup_user_enabled = 1;
int __read_mostly watchdog_thresh = 10;
-static int __read_mostly nmi_watchdog_available;
+static int __read_mostly watchdog_hardlockup_available;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
*/
void __init hardlockup_detector_disable(void)
{
- nmi_watchdog_user_enabled = 0;
+ watchdog_hardlockup_user_enabled = 0;
}
static int __init hardlockup_panic_setup(char *str)
else if (!strncmp(str, "nopanic", 7))
hardlockup_panic = 0;
else if (!strncmp(str, "0", 1))
- nmi_watchdog_user_enabled = 0;
+ watchdog_hardlockup_user_enabled = 0;
else if (!strncmp(str, "1", 1))
- nmi_watchdog_user_enabled = 1;
+ watchdog_hardlockup_user_enabled = 1;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-/*
- * These functions can be overridden if an architecture implements its
- * own hardlockup detector.
- *
- * watchdog_nmi_enable/disable can be implemented to start and stop when
- * softlockup watchdog start and stop. The arch must select the
- * SOFTLOCKUP_DETECTOR Kconfig.
- */
-int __weak watchdog_nmi_enable(unsigned int cpu)
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
+
+static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
+static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
+static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
+static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
+static unsigned long hard_lockup_nmi_warn;
+
+notrace void arch_touch_nmi_watchdog(void)
{
- hardlockup_detector_perf_enable();
- return 0;
+ /*
+ * Using __raw here because some code paths have
+ * preemption enabled. If preemption is enabled
+ * then interrupts should be enabled too, in which
+ * case we shouldn't have to worry about the watchdog
+ * going off.
+ */
+ raw_cpu_write(watchdog_hardlockup_touched, true);
}
+EXPORT_SYMBOL(arch_touch_nmi_watchdog);
-void __weak watchdog_nmi_disable(unsigned int cpu)
+void watchdog_hardlockup_touch_cpu(unsigned int cpu)
{
- hardlockup_detector_perf_disable();
+ per_cpu(watchdog_hardlockup_touched, cpu) = true;
+}
+
+static bool is_hardlockup(unsigned int cpu)
+{
+ int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
+
+ if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+ return true;
+
+ /*
+ * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
+ * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
+ * written/read by a single CPU.
+ */
+ per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+
+ return false;
+}
+
+static void watchdog_hardlockup_kick(void)
+{
+ int new_interrupts;
+
+ new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
+ watchdog_buddy_check_hardlockup(new_interrupts);
+}
+
+void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
+{
+ if (per_cpu(watchdog_hardlockup_touched, cpu)) {
+ per_cpu(watchdog_hardlockup_touched, cpu) = false;
+ return;
+ }
+
+ /*
+ * Check for a hardlockup by making sure the CPU's timer
+ * interrupt is incrementing. The timer interrupt should have
+ * fired multiple times before we overflow'd. If it hasn't
+ * then this is a good indication the cpu is stuck
+ */
+ if (is_hardlockup(cpu)) {
+ unsigned int this_cpu = smp_processor_id();
+ unsigned long flags;
+
+ /* Only print hardlockups once. */
+ if (per_cpu(watchdog_hardlockup_warned, cpu))
+ return;
+
+ /*
+ * Prevent multiple hard-lockup reports if one cpu is already
+ * engaged in dumping all cpu back traces.
+ */
+ if (sysctl_hardlockup_all_cpu_backtrace) {
+ if (test_and_set_bit_lock(0, &hard_lockup_nmi_warn))
+ return;
+ }
+
+ /*
+ * NOTE: we call printk_cpu_sync_get_irqsave() after printing
+ * the lockup message. While it would be nice to serialize
+ * that printout, we really want to make sure that if some
+ * other CPU somehow locked up while holding the lock associated
+ * with printk_cpu_sync_get_irqsave() that we can still at least
+ * get the message about the lockup out.
+ */
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
+ printk_cpu_sync_get_irqsave(flags);
+
+ print_modules();
+ print_irqtrace_events(current);
+ if (cpu == this_cpu) {
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+ printk_cpu_sync_put_irqrestore(flags);
+ } else {
+ printk_cpu_sync_put_irqrestore(flags);
+ trigger_single_cpu_backtrace(cpu);
+ }
+
+ if (sysctl_hardlockup_all_cpu_backtrace) {
+ trigger_allbutcpu_cpu_backtrace(cpu);
+ if (!hardlockup_panic)
+ clear_bit_unlock(0, &hard_lockup_nmi_warn);
+ }
+
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+
+ per_cpu(watchdog_hardlockup_warned, cpu) = true;
+ } else {
+ per_cpu(watchdog_hardlockup_warned, cpu) = false;
+ }
}
-/* Return 0, if a NMI watchdog is available. Error code otherwise */
-int __weak __init watchdog_nmi_probe(void)
+#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
+
+static inline void watchdog_hardlockup_kick(void) { }
+
+#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
+
+/*
+ * These functions can be overridden based on the configured hardlockdup detector.
+ *
+ * watchdog_hardlockup_enable/disable can be implemented to start and stop when
+ * softlockup watchdog start and stop. The detector must select the
+ * SOFTLOCKUP_DETECTOR Kconfig.
+ */
+void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
+
+void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
+
+/*
+ * Watchdog-detector specific API.
+ *
+ * Return 0 when hardlockup watchdog is available, negative value otherwise.
+ * Note that the negative value means that a delayed probe might
+ * succeed later.
+ */
+int __weak __init watchdog_hardlockup_probe(void)
{
- return hardlockup_detector_perf_init();
+ return -ENODEV;
}
/**
- * watchdog_nmi_stop - Stop the watchdog for reconfiguration
+ * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
*
* The reconfiguration steps are:
- * watchdog_nmi_stop();
+ * watchdog_hardlockup_stop();
* update_variables();
- * watchdog_nmi_start();
+ * watchdog_hardlockup_start();
*/
-void __weak watchdog_nmi_stop(void) { }
+void __weak watchdog_hardlockup_stop(void) { }
/**
- * watchdog_nmi_start - Start the watchdog after reconfiguration
+ * watchdog_hardlockup_start - Start the watchdog after reconfiguration
*
- * Counterpart to watchdog_nmi_stop().
+ * Counterpart to watchdog_hardlockup_stop().
*
* The following variables have been updated in update_variables() and
* contain the currently valid configuration:
* - watchdog_thresh
* - watchdog_cpumask
*/
-void __weak watchdog_nmi_start(void) { }
+void __weak watchdog_hardlockup_start(void) { }
/**
* lockup_detector_update_enable - Update the sysctl enable bit
*
- * Caller needs to make sure that the NMI/perf watchdogs are off, so this
- * can't race with watchdog_nmi_disable().
+ * Caller needs to make sure that the hard watchdogs are off, so this
+ * can't race with watchdog_hardlockup_disable().
*/
static void lockup_detector_update_enable(void)
{
watchdog_enabled = 0;
if (!watchdog_user_enabled)
return;
- if (nmi_watchdog_available && nmi_watchdog_user_enabled)
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
- if (soft_watchdog_user_enabled)
- watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
+ if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
+ watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
+ if (watchdog_softlockup_user_enabled)
+ watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
}
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
-static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
-static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;
+static int __init softlockup_panic_setup(char *str)
+{
+ softlockup_panic = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("softlockup_panic=", softlockup_panic_setup);
+
static int __init nowatchdog_setup(char *str)
{
watchdog_user_enabled = 0;
static int __init nosoftlockup_setup(char *str)
{
- soft_watchdog_user_enabled = 0;
+ watchdog_softlockup_user_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
unsigned long period_ts,
unsigned long now)
{
- if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
+ if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
return now - touch_ts;
}
/* watchdog detector functions */
-bool is_hardlockup(void)
-{
- unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
-
- if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
- return true;
-
- __this_cpu_write(hrtimer_interrupts_saved, hrint);
- return false;
-}
-
-static void watchdog_interrupt_count(void)
-{
- __this_cpu_inc(hrtimer_interrupts);
-}
-
static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ unsigned long flags;
if (!watchdog_enabled)
return HRTIMER_NORESTART;
- /* kick the hardlockup detector */
- watchdog_interrupt_count();
+ watchdog_hardlockup_kick();
/* kick the softlockup detector */
if (completion_done(this_cpu_ptr(&softlockup_completion))) {
/* Start period for the next softlockup warning. */
update_report_ts();
+ printk_cpu_sync_get_irqsave(flags);
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
show_regs(regs);
else
dump_stack();
+ printk_cpu_sync_put_irqrestore(flags);
if (softlockup_all_cpu_backtrace) {
- trigger_allbutself_cpu_backtrace();
- clear_bit_unlock(0, &soft_lockup_nmi_warn);
+ trigger_allbutcpu_cpu_backtrace(smp_processor_id());
+ if (!softlockup_panic)
+ clear_bit_unlock(0, &soft_lockup_nmi_warn);
}
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
complete(done);
/*
- * Start the timer first to prevent the NMI watchdog triggering
+ * Start the timer first to prevent the hardlockup watchdog triggering
* before the timer has a chance to fire.
*/
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
/* Initialize timestamp */
update_touch_ts();
- /* Enable the perf event */
- if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
- watchdog_nmi_enable(cpu);
+ /* Enable the hardlockup detector */
+ if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
+ watchdog_hardlockup_enable(cpu);
}
static void watchdog_disable(unsigned int cpu)
WARN_ON_ONCE(cpu != smp_processor_id());
/*
- * Disable the perf event first. That prevents that a large delay
- * between disabling the timer and disabling the perf event causes
- * the perf NMI to detect a false positive.
+ * Disable the hardlockup detector first. That prevents that a large
+ * delay between disabling the timer and disabling the hardlockup
+ * detector causes a false positive.
*/
- watchdog_nmi_disable(cpu);
+ watchdog_hardlockup_disable(cpu);
hrtimer_cancel(hrtimer);
wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
return 0;
}
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
- watchdog_nmi_stop();
+ watchdog_hardlockup_stop();
softlockup_stop_all();
set_sample_period();
if (watchdog_enabled && watchdog_thresh)
softlockup_start_all();
- watchdog_nmi_start();
+ watchdog_hardlockup_start();
cpus_read_unlock();
/*
* Must be called outside the cpus locked section to prevent
__lockup_detector_cleanup();
}
+void lockup_detector_reconfigure(void)
+{
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_reconfigure();
+ mutex_unlock(&watchdog_mutex);
+}
+
/*
* Create the watchdog infrastructure and configure the detector(s).
*/
return;
mutex_lock(&watchdog_mutex);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
- watchdog_nmi_stop();
+ watchdog_hardlockup_stop();
lockup_detector_update_enable();
- watchdog_nmi_start();
+ watchdog_hardlockup_start();
cpus_read_unlock();
}
+void lockup_detector_reconfigure(void)
+{
+ __lockup_detector_reconfigure();
+}
static inline void lockup_detector_setup(void)
{
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
/*
* common function for watchdog, nmi_watchdog and soft_watchdog parameter
*
- * caller | table->data points to | 'which'
- * -------------------|----------------------------|--------------------------
- * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
- * | | SOFT_WATCHDOG_ENABLED
- * -------------------|----------------------------|--------------------------
- * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
- * -------------------|----------------------------|--------------------------
- * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
+ * caller | table->data points to | 'which'
+ * -------------------|----------------------------------|-------------------------------
+ * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
+ * | | WATCHDOG_SOFTOCKUP_ENABLED
+ * -------------------|----------------------------------|-------------------------------
+ * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
+ * -------------------|----------------------------------|-------------------------------
+ * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
*/
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
/*
* /proc/sys/kernel/watchdog
*/
-int proc_watchdog(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_watchdog(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
+ WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
/*
* /proc/sys/kernel/nmi_watchdog
*/
-int proc_nmi_watchdog(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_nmi_watchdog(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
- if (!nmi_watchdog_available && write)
+ if (!watchdog_hardlockup_available && write)
return -ENOTSUPP;
- return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
/*
* /proc/sys/kernel/soft_watchdog
*/
-int proc_soft_watchdog(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_soft_watchdog(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
+#endif
/*
* /proc/sys/kernel/watchdog_thresh
*/
-int proc_watchdog_thresh(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_watchdog_thresh(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int err, old;
* user to specify a mask that will include cpus that have not yet
* been brought online, if desired.
*/
-int proc_watchdog_cpumask(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_watchdog_cpumask(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int err;
.extra1 = SYSCTL_ZERO,
.extra2 = (void *)&sixty,
},
- {
- .procname = "nmi_watchdog",
- .data = &nmi_watchdog_user_enabled,
- .maxlen = sizeof(int),
- .mode = NMI_WATCHDOG_SYSCTL_PERM,
- .proc_handler = proc_nmi_watchdog,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
{
.procname = "watchdog_cpumask",
.data = &watchdog_cpumask_bits,
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
{
.procname = "soft_watchdog",
- .data = &soft_watchdog_user_enabled,
+ .data = &watchdog_softlockup_user_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_soft_watchdog,
{}
};
+static struct ctl_table watchdog_hardlockup_sysctl[] = {
+ {
+ .procname = "nmi_watchdog",
+ .data = &watchdog_hardlockup_user_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_nmi_watchdog,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {}
+};
+
static void __init watchdog_sysctl_init(void)
{
register_sysctl_init("kernel", watchdog_sysctls);
+
+ if (watchdog_hardlockup_available)
+ watchdog_hardlockup_sysctl[0].mode = 0644;
+ register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
}
+
#else
#define watchdog_sysctl_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
+static void __init lockup_detector_delay_init(struct work_struct *work);
+static bool allow_lockup_detector_init_retry __initdata;
+
+static struct work_struct detector_work __initdata =
+ __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
+
+static void __init lockup_detector_delay_init(struct work_struct *work)
+{
+ int ret;
+
+ ret = watchdog_hardlockup_probe();
+ if (ret) {
+ pr_info("Delayed init of the lockup detector failed: %d\n", ret);
+ pr_info("Hard watchdog permanently disabled\n");
+ return;
+ }
+
+ allow_lockup_detector_init_retry = false;
+
+ watchdog_hardlockup_available = true;
+ lockup_detector_setup();
+}
+
+/*
+ * lockup_detector_retry_init - retry init lockup detector if possible.
+ *
+ * Retry hardlockup detector init. It is useful when it requires some
+ * functionality that has to be initialized later on a particular
+ * platform.
+ */
+void __init lockup_detector_retry_init(void)
+{
+ /* Must be called before late init calls */
+ if (!allow_lockup_detector_init_retry)
+ return;
+
+ schedule_work(&detector_work);
+}
+
+/*
+ * Ensure that optional delayed hardlockup init is proceed before
+ * the init code and memory is freed.
+ */
+static int __init lockup_detector_check(void)
+{
+ /* Prevent any later retry. */
+ allow_lockup_detector_init_retry = false;
+
+ /* Make sure no work is pending. */
+ flush_work(&detector_work);
+
+ watchdog_sysctl_init();
+
+ return 0;
+
+}
+late_initcall_sync(lockup_detector_check);
+
void __init lockup_detector_init(void)
{
if (tick_nohz_full_enabled())
cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_TYPE_TIMER));
- if (!watchdog_nmi_probe())
- nmi_watchdog_available = true;
+ if (!watchdog_hardlockup_probe())
+ watchdog_hardlockup_available = true;
+ else
+ allow_lockup_detector_init_retry = true;
+
lockup_detector_setup();
- watchdog_sysctl_init();
}