Merge branch 'rework/kthreads' into for-linus
authorPetr Mladek <pmladek@suse.com>
Thu, 23 Jun 2022 17:11:28 +0000 (19:11 +0200)
committerPetr Mladek <pmladek@suse.com>
Thu, 23 Jun 2022 17:11:28 +0000 (19:11 +0200)
1  2 
drivers/tty/sysrq.c
include/linux/printk.h
kernel/hung_task.c
kernel/panic.c
kernel/reboot.c
kernel/watchdog.c

diff --combined drivers/tty/sysrq.c
@@@ -232,10 -232,8 +232,10 @@@ static void showacpu(void *dummy
        unsigned long flags;
  
        /* Idle CPUs have no interesting backtrace. */
 -      if (idle_cpu(smp_processor_id()))
 +      if (idle_cpu(smp_processor_id())) {
 +              pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
                return;
 +      }
  
        raw_spin_lock_irqsave(&show_lock, flags);
        pr_info("CPU%d:\n", smp_processor_id());
@@@ -262,13 -260,10 +262,13 @@@ static void sysrq_handle_showallcpus(in
  
                if (in_hardirq())
                        regs = get_irq_regs();
 -              if (regs) {
 -                      pr_info("CPU%d:\n", smp_processor_id());
 +
 +              pr_info("CPU%d:\n", smp_processor_id());
 +              if (regs)
                        show_regs(regs);
 -              }
 +              else
 +                      show_stack(NULL, NULL, KERN_INFO);
 +
                schedule_work(&sysrq_showallcpus);
        }
  }
@@@ -279,8 -274,6 +279,8 @@@ static const struct sysrq_key_op sysrq_
        .action_msg     = "Show backtrace of all active CPUs",
        .enable_mask    = SYSRQ_ENABLE_DUMP,
  };
 +#else
 +#define sysrq_showallcpus_op (*(const struct sysrq_key_op *)NULL)
  #endif
  
  static void sysrq_handle_showregs(int key)
@@@ -412,7 -405,6 +412,7 @@@ static const struct sysrq_key_op sysrq_
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
  };
  
 +#ifdef CONFIG_BLOCK
  static void sysrq_handle_thaw(int key)
  {
        emergency_thaw_all();
@@@ -423,9 -415,6 +423,9 @@@ static const struct sysrq_key_op sysrq_
        .action_msg     = "Emergency Thaw of all frozen filesystems",
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
  };
 +#else
 +#define sysrq_thaw_op (*(const struct sysrq_key_op *)NULL)
 +#endif
  
  static void sysrq_handle_kill(int key)
  {
@@@ -479,9 -468,17 +479,9 @@@ static const struct sysrq_key_op *sysrq
        NULL,                           /* g */
        NULL,                           /* h - reserved for help */
        &sysrq_kill_op,                 /* i */
 -#ifdef CONFIG_BLOCK
        &sysrq_thaw_op,                 /* j */
 -#else
 -      NULL,                           /* j */
 -#endif
        &sysrq_SAK_op,                  /* k */
 -#ifdef CONFIG_SMP
        &sysrq_showallcpus_op,          /* l */
 -#else
 -      NULL,                           /* l */
 -#endif
        &sysrq_showmem_op,              /* m */
        &sysrq_unrt_op,                 /* n */
        /* o: This will often be registered as 'Off' at init time */
@@@ -581,7 -578,6 +581,6 @@@ void __handle_sysrq(int key, bool check
  
        rcu_sysrq_start();
        rcu_read_lock();
-       printk_prefer_direct_enter();
        /*
         * Raise the apparent loglevel to maximum so that the sysrq header
         * is shown to provide the user with positive feedback.  We do not
                pr_cont("\n");
                console_loglevel = orig_log_level;
        }
-       printk_prefer_direct_exit();
        rcu_read_unlock();
        rcu_sysrq_end();
  
diff --combined include/linux/printk.h
@@@ -6,6 -6,7 +6,6 @@@
  #include <linux/init.h>
  #include <linux/kern_levels.h>
  #include <linux/linkage.h>
 -#include <linux/cache.h>
  #include <linux/ratelimit_types.h>
  #include <linux/once_lite.h>
  
@@@ -169,11 -170,7 +169,7 @@@ extern void __printk_safe_exit(void)
  #define printk_deferred_enter __printk_safe_enter
  #define printk_deferred_exit __printk_safe_exit
  
- extern void printk_prefer_direct_enter(void);
- extern void printk_prefer_direct_exit(void);
  extern bool pr_flush(int timeout_ms, bool reset_on_progress);
- extern void try_block_console_kthreads(int timeout_ms);
  
  /*
   * Please don't use printk_ratelimit(), because it shares ratelimiting state
@@@ -225,23 -222,11 +221,11 @@@ static inline void printk_deferred_exit
  {
  }
  
- static inline void printk_prefer_direct_enter(void)
- {
- }
- static inline void printk_prefer_direct_exit(void)
- {
- }
  static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
  {
        return true;
  }
  
- static inline void try_block_console_kthreads(int timeout_ms)
- {
- }
  static inline int printk_ratelimit(void)
  {
        return 0;
diff --combined kernel/hung_task.c
@@@ -73,7 -73,7 +73,7 @@@ static unsigned int __read_mostly sysct
   * hung task is detected:
   */
  unsigned int __read_mostly sysctl_hung_task_panic =
 -                              CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
 +                              IS_ENABLED(CONFIG_BOOTPARAM_HUNG_TASK_PANIC);
  
  static int
  hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@@ -127,8 -127,6 +127,6 @@@ static void check_hung_task(struct task
         * complain:
         */
        if (sysctl_hung_task_warnings) {
-               printk_prefer_direct_enter();
                if (sysctl_hung_task_warnings > 0)
                        sysctl_hung_task_warnings--;
                pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
  
                if (sysctl_hung_task_all_cpu_backtrace)
                        hung_task_show_all_bt = true;
-               printk_prefer_direct_exit();
        }
  
        touch_nmi_watchdog();
@@@ -208,17 -204,12 +204,12 @@@ static void check_hung_uninterruptible_
        }
   unlock:
        rcu_read_unlock();
-       if (hung_task_show_lock) {
-               printk_prefer_direct_enter();
+       if (hung_task_show_lock)
                debug_show_all_locks();
-               printk_prefer_direct_exit();
-       }
  
        if (hung_task_show_all_bt) {
                hung_task_show_all_bt = false;
-               printk_prefer_direct_enter();
                trigger_all_cpu_backtrace();
-               printk_prefer_direct_exit();
        }
  
        if (hung_task_call_panic)
diff --combined kernel/panic.c
@@@ -43,9 -43,7 +43,9 @@@
   * Should we dump all CPUs backtraces in an oops event?
   * Defaults to 0, can be changed via sysctl.
   */
 -unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
 +static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
 +#else
 +#define sysctl_oops_all_cpu_backtrace 0
  #endif /* CONFIG_SMP */
  
  int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
@@@ -75,28 -73,6 +75,28 @@@ ATOMIC_NOTIFIER_HEAD(panic_notifier_lis
  
  EXPORT_SYMBOL(panic_notifier_list);
  
 +#if defined(CONFIG_SMP) && defined(CONFIG_SYSCTL)
 +static struct ctl_table kern_panic_table[] = {
 +      {
 +              .procname       = "oops_all_cpu_backtrace",
 +              .data           = &sysctl_oops_all_cpu_backtrace,
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              .proc_handler   = proc_dointvec_minmax,
 +              .extra1         = SYSCTL_ZERO,
 +              .extra2         = SYSCTL_ONE,
 +      },
 +      { }
 +};
 +
 +static __init int kernel_panic_sysctls_init(void)
 +{
 +      register_sysctl_init("kernel", kern_panic_table);
 +      return 0;
 +}
 +late_initcall(kernel_panic_sysctls_init);
 +#endif
 +
  static long no_blink(int state)
  {
        return 0;
@@@ -297,7 -273,6 +297,6 @@@ void panic(const char *fmt, ...
                 * unfortunately means it may not be hardened to work in a
                 * panic situation.
                 */
-               try_block_console_kthreads(10000);
                smp_send_stop();
        } else {
                /*
                 * kmsg_dump, we will need architecture dependent extra
                 * works in addition to stopping other CPUs.
                 */
-               try_block_console_kthreads(10000);
                crash_smp_send_stop();
        }
  
@@@ -605,8 -579,6 +603,6 @@@ void __warn(const char *file, int line
  {
        disable_trace_on_warning();
  
-       printk_prefer_direct_enter();
        if (file)
                pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
                        raw_smp_processor_id(), current->pid, file, line,
  
        /* Just a warning, don't kill lockdep. */
        add_taint(taint, LOCKDEP_STILL_OK);
-       printk_prefer_direct_exit();
  }
  
  #ifndef __WARN_FLAGS
diff --combined kernel/reboot.c
@@@ -23,7 -23,7 +23,7 @@@
   * this indicates whether you can reboot with ctrl-alt-del: the default is yes
   */
  
 -int C_A_D = 1;
 +static int C_A_D = 1;
  struct pid *cad_pid;
  EXPORT_SYMBOL(cad_pid);
  
@@@ -48,20 -48,12 +48,20 @@@ int reboot_cpu
  enum reboot_type reboot_type = BOOT_ACPI;
  int reboot_force;
  
 +struct sys_off_handler {
 +      struct notifier_block nb;
 +      int (*sys_off_cb)(struct sys_off_data *data);
 +      void *cb_data;
 +      enum sys_off_mode mode;
 +      bool blocking;
 +      void *list;
 +};
 +
  /*
 - * If set, this is used for preparing the system to power off.
 + * Temporary stub that prevents linkage failure while we're in process
 + * of removing all uses of legacy pm_power_off() around the kernel.
   */
 -
 -void (*pm_power_off_prepare)(void);
 -EXPORT_SYMBOL_GPL(pm_power_off_prepare);
 +void __weak (*pm_power_off)(void);
  
  /**
   *    emergency_restart - reboot the system
@@@ -82,7 -74,6 +82,6 @@@ void kernel_restart_prepare(char *cmd
  {
        blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
        system_state = SYSTEM_RESTART;
-       try_block_console_kthreads(10000);
        usermodehelper_disable();
        device_shutdown();
  }
@@@ -271,7 -262,6 +270,6 @@@ static void kernel_shutdown_prepare(enu
        blocking_notifier_call_chain(&reboot_notifier_list,
                (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
        system_state = state;
-       try_block_console_kthreads(10000);
        usermodehelper_disable();
        device_shutdown();
  }
@@@ -291,370 -281,6 +289,370 @@@ void kernel_halt(void
  }
  EXPORT_SYMBOL_GPL(kernel_halt);
  
 +/*
 + *    Notifier list for kernel code which wants to be called
 + *    to prepare system for power off.
 + */
 +static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
 +
 +/*
 + *    Notifier list for kernel code which wants to be called
 + *    to power off system.
 + */
 +static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
 +
 +static int sys_off_notify(struct notifier_block *nb,
 +                        unsigned long mode, void *cmd)
 +{
 +      struct sys_off_handler *handler;
 +      struct sys_off_data data = {};
 +
 +      handler = container_of(nb, struct sys_off_handler, nb);
 +      data.cb_data = handler->cb_data;
 +      data.mode = mode;
 +      data.cmd = cmd;
 +
 +      return handler->sys_off_cb(&data);
 +}
 +
 +static struct sys_off_handler platform_sys_off_handler;
 +
 +static struct sys_off_handler *alloc_sys_off_handler(int priority)
 +{
 +      struct sys_off_handler *handler;
 +      gfp_t flags;
 +
 +      /*
 +       * Platforms like m68k can't allocate sys_off handler dynamically
 +       * at the early boot time because memory allocator isn't available yet.
 +       */
 +      if (priority == SYS_OFF_PRIO_PLATFORM) {
 +              handler = &platform_sys_off_handler;
 +              if (handler->cb_data)
 +                      return ERR_PTR(-EBUSY);
 +      } else {
 +              if (system_state > SYSTEM_RUNNING)
 +                      flags = GFP_ATOMIC;
 +              else
 +                      flags = GFP_KERNEL;
 +
 +              handler = kzalloc(sizeof(*handler), flags);
 +              if (!handler)
 +                      return ERR_PTR(-ENOMEM);
 +      }
 +
 +      return handler;
 +}
 +
 +static void free_sys_off_handler(struct sys_off_handler *handler)
 +{
 +      if (handler == &platform_sys_off_handler)
 +              memset(handler, 0, sizeof(*handler));
 +      else
 +              kfree(handler);
 +}
 +
 +/**
 + *    register_sys_off_handler - Register sys-off handler
 + *    @mode: Sys-off mode
 + *    @priority: Handler priority
 + *    @callback: Callback function
 + *    @cb_data: Callback argument
 + *
 + *    Registers system power-off or restart handler that will be invoked
 + *    at the step corresponding to the given sys-off mode. Handler's callback
 + *    should return NOTIFY_DONE to permit execution of the next handler in
 + *    the call chain or NOTIFY_STOP to break the chain (in error case for
 + *    example).
 + *
 + *    Multiple handlers can be registered at the default priority level.
 + *
 + *    Only one handler can be registered at the non-default priority level,
 + *    otherwise ERR_PTR(-EBUSY) is returned.
 + *
 + *    Returns a new instance of struct sys_off_handler on success, or
 + *    an ERR_PTR()-encoded error code otherwise.
 + */
 +struct sys_off_handler *
 +register_sys_off_handler(enum sys_off_mode mode,
 +                       int priority,
 +                       int (*callback)(struct sys_off_data *data),
 +                       void *cb_data)
 +{
 +      struct sys_off_handler *handler;
 +      int err;
 +
 +      handler = alloc_sys_off_handler(priority);
 +      if (IS_ERR(handler))
 +              return handler;
 +
 +      switch (mode) {
 +      case SYS_OFF_MODE_POWER_OFF_PREPARE:
 +              handler->list = &power_off_prep_handler_list;
 +              handler->blocking = true;
 +              break;
 +
 +      case SYS_OFF_MODE_POWER_OFF:
 +              handler->list = &power_off_handler_list;
 +              break;
 +
 +      case SYS_OFF_MODE_RESTART:
 +              handler->list = &restart_handler_list;
 +              break;
 +
 +      default:
 +              free_sys_off_handler(handler);
 +              return ERR_PTR(-EINVAL);
 +      }
 +
 +      handler->nb.notifier_call = sys_off_notify;
 +      handler->nb.priority = priority;
 +      handler->sys_off_cb = callback;
 +      handler->cb_data = cb_data;
 +      handler->mode = mode;
 +
 +      if (handler->blocking) {
 +              if (priority == SYS_OFF_PRIO_DEFAULT)
 +                      err = blocking_notifier_chain_register(handler->list,
 +                                                             &handler->nb);
 +              else
 +                      err = blocking_notifier_chain_register_unique_prio(handler->list,
 +                                                                         &handler->nb);
 +      } else {
 +              if (priority == SYS_OFF_PRIO_DEFAULT)
 +                      err = atomic_notifier_chain_register(handler->list,
 +                                                           &handler->nb);
 +              else
 +                      err = atomic_notifier_chain_register_unique_prio(handler->list,
 +                                                                       &handler->nb);
 +      }
 +
 +      if (err) {
 +              free_sys_off_handler(handler);
 +              return ERR_PTR(err);
 +      }
 +
 +      return handler;
 +}
 +EXPORT_SYMBOL_GPL(register_sys_off_handler);
 +
 +/**
 + *    unregister_sys_off_handler - Unregister sys-off handler
 + *    @handler: Sys-off handler
 + *
 + *    Unregisters given sys-off handler.
 + */
 +void unregister_sys_off_handler(struct sys_off_handler *handler)
 +{
 +      int err;
 +
 +      if (IS_ERR_OR_NULL(handler))
 +              return;
 +
 +      if (handler->blocking)
 +              err = blocking_notifier_chain_unregister(handler->list,
 +                                                       &handler->nb);
 +      else
 +              err = atomic_notifier_chain_unregister(handler->list,
 +                                                     &handler->nb);
 +
 +      /* sanity check, shall never happen */
 +      WARN_ON(err);
 +
 +      free_sys_off_handler(handler);
 +}
 +EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
 +
 +static void devm_unregister_sys_off_handler(void *data)
 +{
 +      struct sys_off_handler *handler = data;
 +
 +      unregister_sys_off_handler(handler);
 +}
 +
 +/**
 + *    devm_register_sys_off_handler - Register sys-off handler
 + *    @dev: Device that registers handler
 + *    @mode: Sys-off mode
 + *    @priority: Handler priority
 + *    @callback: Callback function
 + *    @cb_data: Callback argument
 + *
 + *    Registers resource-managed sys-off handler.
 + *
 + *    Returns zero on success, or error code on failure.
 + */
 +int devm_register_sys_off_handler(struct device *dev,
 +                                enum sys_off_mode mode,
 +                                int priority,
 +                                int (*callback)(struct sys_off_data *data),
 +                                void *cb_data)
 +{
 +      struct sys_off_handler *handler;
 +
 +      handler = register_sys_off_handler(mode, priority, callback, cb_data);
 +      if (IS_ERR(handler))
 +              return PTR_ERR(handler);
 +
 +      return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
 +                                      handler);
 +}
 +EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
 +
 +/**
 + *    devm_register_power_off_handler - Register power-off handler
 + *    @dev: Device that registers callback
 + *    @callback: Callback function
 + *    @cb_data: Callback's argument
 + *
 + *    Registers resource-managed sys-off handler with a default priority
 + *    and using power-off mode.
 + *
 + *    Returns zero on success, or error code on failure.
 + */
 +int devm_register_power_off_handler(struct device *dev,
 +                                  int (*callback)(struct sys_off_data *data),
 +                                  void *cb_data)
 +{
 +      return devm_register_sys_off_handler(dev,
 +                                           SYS_OFF_MODE_POWER_OFF,
 +                                           SYS_OFF_PRIO_DEFAULT,
 +                                           callback, cb_data);
 +}
 +EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
 +
 +/**
 + *    devm_register_restart_handler - Register restart handler
 + *    @dev: Device that registers callback
 + *    @callback: Callback function
 + *    @cb_data: Callback's argument
 + *
 + *    Registers resource-managed sys-off handler with a default priority
 + *    and using restart mode.
 + *
 + *    Returns zero on success, or error code on failure.
 + */
 +int devm_register_restart_handler(struct device *dev,
 +                                int (*callback)(struct sys_off_data *data),
 +                                void *cb_data)
 +{
 +      return devm_register_sys_off_handler(dev,
 +                                           SYS_OFF_MODE_RESTART,
 +                                           SYS_OFF_PRIO_DEFAULT,
 +                                           callback, cb_data);
 +}
 +EXPORT_SYMBOL_GPL(devm_register_restart_handler);
 +
 +static struct sys_off_handler *platform_power_off_handler;
 +
 +static int platform_power_off_notify(struct sys_off_data *data)
 +{
 +      void (*platform_power_power_off_cb)(void) = data->cb_data;
 +
 +      platform_power_power_off_cb();
 +
 +      return NOTIFY_DONE;
 +}
 +
 +/**
 + *    register_platform_power_off - Register platform-level power-off callback
 + *    @power_off: Power-off callback
 + *
 + *    Registers power-off callback that will be called as last step
 + *    of the power-off sequence. This callback is expected to be invoked
 + *    for the last resort. Only one platform power-off callback is allowed
 + *    to be registered at a time.
 + *
 + *    Returns zero on success, or error code on failure.
 + */
 +int register_platform_power_off(void (*power_off)(void))
 +{
 +      struct sys_off_handler *handler;
 +
 +      handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
 +                                         SYS_OFF_PRIO_PLATFORM,
 +                                         platform_power_off_notify,
 +                                         power_off);
 +      if (IS_ERR(handler))
 +              return PTR_ERR(handler);
 +
 +      platform_power_off_handler = handler;
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(register_platform_power_off);
 +
 +/**
 + *    unregister_platform_power_off - Unregister platform-level power-off callback
 + *    @power_off: Power-off callback
 + *
 + *    Unregisters previously registered platform power-off callback.
 + */
 +void unregister_platform_power_off(void (*power_off)(void))
 +{
 +      if (platform_power_off_handler &&
 +          platform_power_off_handler->cb_data == power_off) {
 +              unregister_sys_off_handler(platform_power_off_handler);
 +              platform_power_off_handler = NULL;
 +      }
 +}
 +EXPORT_SYMBOL_GPL(unregister_platform_power_off);
 +
 +static int legacy_pm_power_off(struct sys_off_data *data)
 +{
 +      if (pm_power_off)
 +              pm_power_off();
 +
 +      return NOTIFY_DONE;
 +}
 +
 +static void do_kernel_power_off_prepare(void)
 +{
 +      blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
 +}
 +
 +/**
 + *    do_kernel_power_off - Execute kernel power-off handler call chain
 + *
 + *    Expected to be called as last step of the power-off sequence.
 + *
 + *    Powers off the system immediately if a power-off handler function has
 + *    been registered. Otherwise does nothing.
 + */
 +void do_kernel_power_off(void)
 +{
 +      struct sys_off_handler *sys_off = NULL;
 +
 +      /*
 +       * Register sys-off handlers for legacy PM callback. This allows
 +       * legacy PM callbacks temporary co-exist with the new sys-off API.
 +       *
 +       * TODO: Remove legacy handlers once all legacy PM users will be
 +       *       switched to the sys-off based APIs.
 +       */
 +      if (pm_power_off)
 +              sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
 +                                                 SYS_OFF_PRIO_DEFAULT,
 +                                                 legacy_pm_power_off, NULL);
 +
 +      atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
 +
 +      unregister_sys_off_handler(sys_off);
 +}
 +
 +/**
 + *    kernel_can_power_off - check whether system can be powered off
 + *
 + *    Returns true if power-off handler is registered and system can be
 + *    powered off, false otherwise.
 + */
 +bool kernel_can_power_off(void)
 +{
 +      return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
 +              pm_power_off;
 +}
 +EXPORT_SYMBOL_GPL(kernel_can_power_off);
 +
  /**
   *    kernel_power_off - power_off the system
   *
  void kernel_power_off(void)
  {
        kernel_shutdown_prepare(SYSTEM_POWER_OFF);
 -      if (pm_power_off_prepare)
 -              pm_power_off_prepare();
 +      do_kernel_power_off_prepare();
        migrate_to_reboot_cpu();
        syscore_shutdown();
        pr_emerg("Power down\n");
@@@ -713,7 -340,7 +711,7 @@@ SYSCALL_DEFINE4(reboot, int, magic1, in
        /* Instead of trying to make the power_off code look like
         * halt when pm_power_off is not set do it the easy way.
         */
 -      if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
 +      if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off())
                cmd = LINUX_REBOOT_CMD_HALT;
  
        mutex_lock(&system_transition_mutex);
@@@ -790,8 -417,7 +788,8 @@@ void ctrl_alt_del(void
                kill_cad_pid(SIGINT, 1);
  }
  
 -char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
 +#define POWEROFF_CMD_PATH_LEN  256
 +static char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
  static const char reboot_cmd[] = "/sbin/reboot";
  
  static int run_cmd(const char *cmd)
@@@ -821,11 -447,9 +819,9 @@@ static int __orderly_reboot(void
        ret = run_cmd(reboot_cmd);
  
        if (ret) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly reboot: forcing the issue\n");
                emergency_sync();
                kernel_restart(NULL);
-               printk_prefer_direct_exit();
        }
  
        return ret;
@@@ -838,7 -462,6 +834,6 @@@ static int __orderly_poweroff(bool forc
        ret = run_cmd(poweroff_cmd);
  
        if (ret && force) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly shutdown: forcing the issue\n");
  
                /*
                 */
                emergency_sync();
                kernel_power_off();
-               printk_prefer_direct_exit();
        }
  
        return ret;
@@@ -906,8 -528,6 +900,6 @@@ EXPORT_SYMBOL_GPL(orderly_reboot)
   */
  static void hw_failure_emergency_poweroff_func(struct work_struct *work)
  {
-       printk_prefer_direct_enter();
        /*
         * We have reached here after the emergency shutdown waiting period has
         * expired. This means orderly_poweroff has not been able to shut off
         */
        pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
        emergency_restart();
-       printk_prefer_direct_exit();
  }
  
  static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
@@@ -964,13 -582,11 +954,11 @@@ void hw_protection_shutdown(const char 
  {
        static atomic_t allow_proceed = ATOMIC_INIT(1);
  
-       printk_prefer_direct_enter();
        pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
  
        /* Shutdown should be initiated only once. */
        if (!atomic_dec_and_test(&allow_proceed))
-               goto out;
+               return;
  
        /*
         * Queue a backup emergency shutdown in the event of
         */
        hw_failure_emergency_poweroff(ms_until_forced);
        orderly_poweroff(true);
- out:
-       printk_prefer_direct_exit();
  }
  EXPORT_SYMBOL_GPL(hw_protection_shutdown);
  
@@@ -1253,33 -867,6 +1239,33 @@@ static struct attribute *reboot_attrs[
        NULL,
  };
  
 +#ifdef CONFIG_SYSCTL
 +static struct ctl_table kern_reboot_table[] = {
 +      {
 +              .procname       = "poweroff_cmd",
 +              .data           = &poweroff_cmd,
 +              .maxlen         = POWEROFF_CMD_PATH_LEN,
 +              .mode           = 0644,
 +              .proc_handler   = proc_dostring,
 +      },
 +      {
 +              .procname       = "ctrl-alt-del",
 +              .data           = &C_A_D,
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              .proc_handler   = proc_dointvec,
 +      },
 +      { }
 +};
 +
 +static void __init kernel_reboot_sysctls_init(void)
 +{
 +      register_sysctl_init("kernel", kern_reboot_table);
 +}
 +#else
 +#define kernel_reboot_sysctls_init() do { } while (0)
 +#endif /* CONFIG_SYSCTL */
 +
  static const struct attribute_group reboot_attr_group = {
        .attrs = reboot_attrs,
  };
@@@ -1299,8 -886,6 +1285,8 @@@ static int __init reboot_ksysfs_init(vo
                return ret;
        }
  
 +      kernel_reboot_sysctls_init();
 +
        return 0;
  }
  late_initcall(reboot_ksysfs_init);
diff --combined kernel/watchdog.c
@@@ -57,7 -57,7 +57,7 @@@ int __read_mostly sysctl_hardlockup_all
   * Should we panic when a soft-lockup or hard-lockup occurs:
   */
  unsigned int __read_mostly hardlockup_panic =
 -                      CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
 +                      IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
  /*
   * We may not want to enable hard lockup detection by default in all cases,
   * for example when running the kernel as a guest on a hypervisor. In these
@@@ -168,7 -168,7 +168,7 @@@ static struct cpumask watchdog_allowed_
  
  /* Global variables, exported for sysctl */
  unsigned int __read_mostly softlockup_panic =
 -                      CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 +                      IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
  
  static bool softlockup_initialized __read_mostly;
  static u64 __read_mostly sample_period;
@@@ -424,8 -424,6 +424,6 @@@ static enum hrtimer_restart watchdog_ti
                /* Start period for the next softlockup warning. */
                update_report_ts();
  
-               printk_prefer_direct_enter();
                pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
                add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-               printk_prefer_direct_exit();
        }
  
        return HRTIMER_RESTART;