Merge tag 'driver-core-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / base / power / main.c
index bd5bb5a..dcfc0a3 100644 (file)
@@ -205,7 +205,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
        if (!pm_print_times_enabled)
                return 0;
 
-       dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+       dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
                 task_pid_nr(current),
                 dev->parent ? dev_name(dev->parent) : "none");
        return ktime_get();
@@ -223,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
        rettime = ktime_get();
        nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 
-       dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
+       dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
                 (unsigned long long)nsecs >> 10);
 }
 
@@ -476,7 +476,7 @@ struct dpm_watchdog {
 
 /**
  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
  *
  * Called when a driver has timed out suspending or resuming.
  * There's not much we can do here to recover so panic() to
@@ -704,6 +704,19 @@ static bool is_async(struct device *dev)
                && !pm_trace_is_enabled();
 }
 
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (is_async(dev)) {
+               get_device(dev);
+               async_schedule(func, dev);
+               return true;
+       }
+
+       return false;
+}
+
 static void async_resume_noirq(void *data, async_cookie_t cookie)
 {
        struct device *dev = (struct device *)data;
@@ -730,13 +743,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
         * in case the starting of async threads is
         * delayed by non-async resuming devices.
         */
-       list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume_noirq, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+               dpm_async_fn(dev, async_resume_noirq);
 
        while (!list_empty(&dpm_noirq_list)) {
                dev = to_device(dpm_noirq_list.next);
@@ -887,13 +895,8 @@ void dpm_resume_early(pm_message_t state)
         * in case the starting of async threads is
         * delayed by non-async resuming devices.
         */
-       list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume_early, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+               dpm_async_fn(dev, async_resume_early);
 
        while (!list_empty(&dpm_late_early_list)) {
                dev = to_device(dpm_late_early_list.next);
@@ -1051,13 +1054,8 @@ void dpm_resume(pm_message_t state)
        pm_transition = state;
        async_error = 0;
 
-       list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+               dpm_async_fn(dev, async_resume);
 
        while (!list_empty(&dpm_suspended_list)) {
                dev = to_device(dpm_suspended_list.next);
@@ -1371,13 +1369,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
 
 static int device_suspend_noirq(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend_noirq, dev);
+       if (dpm_async_fn(dev, async_suspend_noirq))
                return 0;
-       }
+
        return __device_suspend_noirq(dev, pm_transition, false);
 }
 
@@ -1574,13 +1568,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
 
 static int device_suspend_late(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend_late, dev);
+       if (dpm_async_fn(dev, async_suspend_late))
                return 0;
-       }
 
        return __device_suspend_late(dev, pm_transition, false);
 }
@@ -1745,6 +1734,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (dev->power.syscore)
                goto Complete;
 
+       /* Avoid direct_complete to let wakeup_path propagate. */
+       if (device_may_wakeup(dev) || dev->power.wakeup_path)
+               dev->power.direct_complete = false;
+
        if (dev->power.direct_complete) {
                if (pm_runtime_status_suspended(dev)) {
                        pm_runtime_disable(dev);
@@ -1840,13 +1833,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
 
 static int device_suspend(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend, dev);
+       if (dpm_async_fn(dev, async_suspend))
                return 0;
-       }
 
        return __device_suspend(dev, pm_transition, false);
 }
@@ -2061,14 +2049,14 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
 void __suspend_report_result(const char *function, void *fn, int ret)
 {
        if (ret)
-               pr_err("%s(): %pF returns %d\n", function, fn, ret);
+               pr_err("%s(): %pS returns %d\n", function, fn, ret);
 }
 EXPORT_SYMBOL_GPL(__suspend_report_result);
 
 /**
  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
  * @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
  */
 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
 {