PM: runtime: Drop pm_runtime_clean_up_links()
[linux-2.6-microblaze.git] / drivers / base / power / runtime.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
4  *
5  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7  */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15
16 #include "../base.h"
17 #include "power.h"
18
19 typedef int (*pm_callback_t)(struct device *);
20
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23         pm_callback_t cb;
24         const struct dev_pm_ops *ops;
25
26         if (dev->pm_domain)
27                 ops = &dev->pm_domain->ops;
28         else if (dev->type && dev->type->pm)
29                 ops = dev->type->pm;
30         else if (dev->class && dev->class->pm)
31                 ops = dev->class->pm;
32         else if (dev->bus && dev->bus->pm)
33                 ops = dev->bus->pm;
34         else
35                 ops = NULL;
36
37         if (ops)
38                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39         else
40                 cb = NULL;
41
42         if (!cb && dev->driver && dev->driver->pm)
43                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45         return cb;
46 }
47
48 #define RPM_GET_CALLBACK(dev, callback) \
49                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53
54 /**
55  * update_pm_runtime_accounting - Update the time accounting of power states
56  * @dev: Device to update the accounting for
57  *
58  * In order to be able to have time accounting of the various power states
59  * (as used by programs such as PowerTOP to show the effectiveness of runtime
60  * PM), we need to track the time spent in each state.
61  * update_pm_runtime_accounting must be called each time before the
62  * runtime_status field is updated, to account the time in the old state
63  * correctly.
64  */
65 static void update_pm_runtime_accounting(struct device *dev)
66 {
67         u64 now, last, delta;
68
69         if (dev->power.disable_depth > 0)
70                 return;
71
72         last = dev->power.accounting_timestamp;
73
74         now = ktime_get_mono_fast_ns();
75         dev->power.accounting_timestamp = now;
76
77         /*
78          * Because ktime_get_mono_fast_ns() is not monotonic during
79          * timekeeping updates, ensure that 'now' is after the last saved
80          * timesptamp.
81          */
82         if (now < last)
83                 return;
84
85         delta = now - last;
86
87         if (dev->power.runtime_status == RPM_SUSPENDED)
88                 dev->power.suspended_time += delta;
89         else
90                 dev->power.active_time += delta;
91 }
92
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
94 {
95         update_pm_runtime_accounting(dev);
96         dev->power.runtime_status = status;
97 }
98
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100 {
101         u64 time;
102         unsigned long flags;
103
104         spin_lock_irqsave(&dev->power.lock, flags);
105
106         update_pm_runtime_accounting(dev);
107         time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109         spin_unlock_irqrestore(&dev->power.lock, flags);
110
111         return time;
112 }
113
114 u64 pm_runtime_active_time(struct device *dev)
115 {
116         return rpm_get_accounted_time(dev, false);
117 }
118
119 u64 pm_runtime_suspended_time(struct device *dev)
120 {
121         return rpm_get_accounted_time(dev, true);
122 }
123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125 /**
126  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127  * @dev: Device to handle.
128  */
129 static void pm_runtime_deactivate_timer(struct device *dev)
130 {
131         if (dev->power.timer_expires > 0) {
132                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133                 dev->power.timer_expires = 0;
134         }
135 }
136
137 /**
138  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139  * @dev: Device to handle.
140  */
141 static void pm_runtime_cancel_pending(struct device *dev)
142 {
143         pm_runtime_deactivate_timer(dev);
144         /*
145          * In case there's a request pending, make sure its work function will
146          * return without doing anything.
147          */
148         dev->power.request = RPM_REQ_NONE;
149 }
150
151 /*
152  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153  * @dev: Device to handle.
154  *
155  * Compute the autosuspend-delay expiration time based on the device's
156  * power.last_busy time.  If the delay has already expired or is disabled
157  * (negative) or the power.use_autosuspend flag isn't set, return 0.
158  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
159  *
160  * This function may be called either with or without dev->power.lock held.
161  * Either way it can be racy, since power.last_busy may be updated at any time.
162  */
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
164 {
165         int autosuspend_delay;
166         u64 expires;
167
168         if (!dev->power.use_autosuspend)
169                 return 0;
170
171         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172         if (autosuspend_delay < 0)
173                 return 0;
174
175         expires  = READ_ONCE(dev->power.last_busy);
176         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177         if (expires > ktime_get_mono_fast_ns())
178                 return expires; /* Expires in the future */
179
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184 static int dev_memalloc_noio(struct device *dev, void *data)
185 {
186         return dev->power.memalloc_noio;
187 }
188
189 /*
190  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191  * @dev: Device to handle.
192  * @enable: True for setting the flag and False for clearing the flag.
193  *
194  * Set the flag for all devices in the path from the device to the
195  * root device in the device tree if @enable is true, otherwise clear
196  * the flag for devices in the path whose siblings don't set the flag.
197  *
198  * The function should only be called by block device, or network
199  * device driver for solving the deadlock problem during runtime
200  * resume/suspend:
201  *
202  *     If memory allocation with GFP_KERNEL is called inside runtime
203  *     resume/suspend callback of any one of its ancestors(or the
204  *     block device itself), the deadlock may be triggered inside the
205  *     memory allocation since it might not complete until the block
206  *     device becomes active and the involed page I/O finishes. The
207  *     situation is pointed out first by Alan Stern. Network device
208  *     are involved in iSCSI kind of situation.
209  *
210  * The lock of dev_hotplug_mutex is held in the function for handling
211  * hotplug race because pm_runtime_set_memalloc_noio() may be called
212  * in async probe().
213  *
214  * The function should be called between device_add() and device_del()
215  * on the affected device(block/network device).
216  */
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218 {
219         static DEFINE_MUTEX(dev_hotplug_mutex);
220
221         mutex_lock(&dev_hotplug_mutex);
222         for (;;) {
223                 bool enabled;
224
225                 /* hold power lock since bitfield is not SMP-safe. */
226                 spin_lock_irq(&dev->power.lock);
227                 enabled = dev->power.memalloc_noio;
228                 dev->power.memalloc_noio = enable;
229                 spin_unlock_irq(&dev->power.lock);
230
231                 /*
232                  * not need to enable ancestors any more if the device
233                  * has been enabled.
234                  */
235                 if (enabled && enable)
236                         break;
237
238                 dev = dev->parent;
239
240                 /*
241                  * clear flag of the parent device only if all the
242                  * children don't set the flag because ancestor's
243                  * flag was set by any one of the descendants.
244                  */
245                 if (!dev || (!enable &&
246                              device_for_each_child(dev, NULL,
247                                                    dev_memalloc_noio)))
248                         break;
249         }
250         mutex_unlock(&dev_hotplug_mutex);
251 }
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254 /**
255  * rpm_check_suspend_allowed - Test whether a device may be suspended.
256  * @dev: Device to test.
257  */
258 static int rpm_check_suspend_allowed(struct device *dev)
259 {
260         int retval = 0;
261
262         if (dev->power.runtime_error)
263                 retval = -EINVAL;
264         else if (dev->power.disable_depth > 0)
265                 retval = -EACCES;
266         else if (atomic_read(&dev->power.usage_count) > 0)
267                 retval = -EAGAIN;
268         else if (!dev->power.ignore_children &&
269                         atomic_read(&dev->power.child_count))
270                 retval = -EBUSY;
271
272         /* Pending resume requests take precedence over suspends. */
273         else if ((dev->power.deferred_resume
274                         && dev->power.runtime_status == RPM_SUSPENDING)
275             || (dev->power.request_pending
276                         && dev->power.request == RPM_REQ_RESUME))
277                 retval = -EAGAIN;
278         else if (__dev_pm_qos_resume_latency(dev) == 0)
279                 retval = -EPERM;
280         else if (dev->power.runtime_status == RPM_SUSPENDED)
281                 retval = 1;
282
283         return retval;
284 }
285
286 static int rpm_get_suppliers(struct device *dev)
287 {
288         struct device_link *link;
289
290         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291                                 device_links_read_lock_held()) {
292                 int retval;
293
294                 if (!(link->flags & DL_FLAG_PM_RUNTIME))
295                         continue;
296
297                 retval = pm_runtime_get_sync(link->supplier);
298                 /* Ignore suppliers with disabled runtime PM. */
299                 if (retval < 0 && retval != -EACCES) {
300                         pm_runtime_put_noidle(link->supplier);
301                         return retval;
302                 }
303                 refcount_inc(&link->rpm_active);
304         }
305         return 0;
306 }
307
308 static void rpm_put_suppliers(struct device *dev)
309 {
310         struct device_link *link;
311
312         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
313                                 device_links_read_lock_held()) {
314
315                 while (refcount_dec_not_one(&link->rpm_active))
316                         pm_runtime_put(link->supplier);
317         }
318 }
319
320 /**
321  * __rpm_callback - Run a given runtime PM callback for a given device.
322  * @cb: Runtime PM callback to run.
323  * @dev: Device to run the callback for.
324  */
325 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
326         __releases(&dev->power.lock) __acquires(&dev->power.lock)
327 {
328         int retval, idx;
329         bool use_links = dev->power.links_count > 0;
330
331         if (dev->power.irq_safe) {
332                 spin_unlock(&dev->power.lock);
333         } else {
334                 spin_unlock_irq(&dev->power.lock);
335
336                 /*
337                  * Resume suppliers if necessary.
338                  *
339                  * The device's runtime PM status cannot change until this
340                  * routine returns, so it is safe to read the status outside of
341                  * the lock.
342                  */
343                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
344                         idx = device_links_read_lock();
345
346                         retval = rpm_get_suppliers(dev);
347                         if (retval)
348                                 goto fail;
349
350                         device_links_read_unlock(idx);
351                 }
352         }
353
354         retval = cb(dev);
355
356         if (dev->power.irq_safe) {
357                 spin_lock(&dev->power.lock);
358         } else {
359                 /*
360                  * If the device is suspending and the callback has returned
361                  * success, drop the usage counters of the suppliers that have
362                  * been reference counted on its resume.
363                  *
364                  * Do that if resume fails too.
365                  */
366                 if (use_links
367                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
368                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
369                         idx = device_links_read_lock();
370
371  fail:
372                         rpm_put_suppliers(dev);
373
374                         device_links_read_unlock(idx);
375                 }
376
377                 spin_lock_irq(&dev->power.lock);
378         }
379
380         return retval;
381 }
382
383 /**
384  * rpm_idle - Notify device bus type if the device can be suspended.
385  * @dev: Device to notify the bus type about.
386  * @rpmflags: Flag bits.
387  *
388  * Check if the device's runtime PM status allows it to be suspended.  If
389  * another idle notification has been started earlier, return immediately.  If
390  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
391  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
392  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
393  *
394  * This function must be called under dev->power.lock with interrupts disabled.
395  */
396 static int rpm_idle(struct device *dev, int rpmflags)
397 {
398         int (*callback)(struct device *);
399         int retval;
400
401         trace_rpm_idle_rcuidle(dev, rpmflags);
402         retval = rpm_check_suspend_allowed(dev);
403         if (retval < 0)
404                 ;       /* Conditions are wrong. */
405
406         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
407         else if (dev->power.runtime_status != RPM_ACTIVE)
408                 retval = -EAGAIN;
409
410         /*
411          * Any pending request other than an idle notification takes
412          * precedence over us, except that the timer may be running.
413          */
414         else if (dev->power.request_pending &&
415             dev->power.request > RPM_REQ_IDLE)
416                 retval = -EAGAIN;
417
418         /* Act as though RPM_NOWAIT is always set. */
419         else if (dev->power.idle_notification)
420                 retval = -EINPROGRESS;
421         if (retval)
422                 goto out;
423
424         /* Pending requests need to be canceled. */
425         dev->power.request = RPM_REQ_NONE;
426
427         if (dev->power.no_callbacks)
428                 goto out;
429
430         /* Carry out an asynchronous or a synchronous idle notification. */
431         if (rpmflags & RPM_ASYNC) {
432                 dev->power.request = RPM_REQ_IDLE;
433                 if (!dev->power.request_pending) {
434                         dev->power.request_pending = true;
435                         queue_work(pm_wq, &dev->power.work);
436                 }
437                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
438                 return 0;
439         }
440
441         dev->power.idle_notification = true;
442
443         callback = RPM_GET_CALLBACK(dev, runtime_idle);
444
445         if (callback)
446                 retval = __rpm_callback(callback, dev);
447
448         dev->power.idle_notification = false;
449         wake_up_all(&dev->power.wait_queue);
450
451  out:
452         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
453         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
454 }
455
456 /**
457  * rpm_callback - Run a given runtime PM callback for a given device.
458  * @cb: Runtime PM callback to run.
459  * @dev: Device to run the callback for.
460  */
461 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
462 {
463         int retval;
464
465         if (!cb)
466                 return -ENOSYS;
467
468         if (dev->power.memalloc_noio) {
469                 unsigned int noio_flag;
470
471                 /*
472                  * Deadlock might be caused if memory allocation with
473                  * GFP_KERNEL happens inside runtime_suspend and
474                  * runtime_resume callbacks of one block device's
475                  * ancestor or the block device itself. Network
476                  * device might be thought as part of iSCSI block
477                  * device, so network device and its ancestor should
478                  * be marked as memalloc_noio too.
479                  */
480                 noio_flag = memalloc_noio_save();
481                 retval = __rpm_callback(cb, dev);
482                 memalloc_noio_restore(noio_flag);
483         } else {
484                 retval = __rpm_callback(cb, dev);
485         }
486
487         dev->power.runtime_error = retval;
488         return retval != -EACCES ? retval : -EIO;
489 }
490
491 /**
492  * rpm_suspend - Carry out runtime suspend of given device.
493  * @dev: Device to suspend.
494  * @rpmflags: Flag bits.
495  *
496  * Check if the device's runtime PM status allows it to be suspended.
497  * Cancel a pending idle notification, autosuspend or suspend. If
498  * another suspend has been started earlier, either return immediately
499  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
500  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
501  * otherwise run the ->runtime_suspend() callback directly. When
502  * ->runtime_suspend succeeded, if a deferred resume was requested while
503  * the callback was running then carry it out, otherwise send an idle
504  * notification for its parent (if the suspend succeeded and both
505  * ignore_children of parent->power and irq_safe of dev->power are not set).
506  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
507  * flag is set and the next autosuspend-delay expiration time is in the
508  * future, schedule another autosuspend attempt.
509  *
510  * This function must be called under dev->power.lock with interrupts disabled.
511  */
512 static int rpm_suspend(struct device *dev, int rpmflags)
513         __releases(&dev->power.lock) __acquires(&dev->power.lock)
514 {
515         int (*callback)(struct device *);
516         struct device *parent = NULL;
517         int retval;
518
519         trace_rpm_suspend_rcuidle(dev, rpmflags);
520
521  repeat:
522         retval = rpm_check_suspend_allowed(dev);
523         if (retval < 0)
524                 goto out;       /* Conditions are wrong. */
525
526         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
527         if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
528                 retval = -EAGAIN;
529         if (retval)
530                 goto out;
531
532         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
533         if ((rpmflags & RPM_AUTO)
534             && dev->power.runtime_status != RPM_SUSPENDING) {
535                 u64 expires = pm_runtime_autosuspend_expiration(dev);
536
537                 if (expires != 0) {
538                         /* Pending requests need to be canceled. */
539                         dev->power.request = RPM_REQ_NONE;
540
541                         /*
542                          * Optimization: If the timer is already running and is
543                          * set to expire at or before the autosuspend delay,
544                          * avoid the overhead of resetting it.  Just let it
545                          * expire; pm_suspend_timer_fn() will take care of the
546                          * rest.
547                          */
548                         if (!(dev->power.timer_expires &&
549                                         dev->power.timer_expires <= expires)) {
550                                 /*
551                                  * We add a slack of 25% to gather wakeups
552                                  * without sacrificing the granularity.
553                                  */
554                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
555                                                     (NSEC_PER_MSEC >> 2);
556
557                                 dev->power.timer_expires = expires;
558                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
559                                                 ns_to_ktime(expires),
560                                                 slack,
561                                                 HRTIMER_MODE_ABS);
562                         }
563                         dev->power.timer_autosuspends = 1;
564                         goto out;
565                 }
566         }
567
568         /* Other scheduled or pending requests need to be canceled. */
569         pm_runtime_cancel_pending(dev);
570
571         if (dev->power.runtime_status == RPM_SUSPENDING) {
572                 DEFINE_WAIT(wait);
573
574                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
575                         retval = -EINPROGRESS;
576                         goto out;
577                 }
578
579                 if (dev->power.irq_safe) {
580                         spin_unlock(&dev->power.lock);
581
582                         cpu_relax();
583
584                         spin_lock(&dev->power.lock);
585                         goto repeat;
586                 }
587
588                 /* Wait for the other suspend running in parallel with us. */
589                 for (;;) {
590                         prepare_to_wait(&dev->power.wait_queue, &wait,
591                                         TASK_UNINTERRUPTIBLE);
592                         if (dev->power.runtime_status != RPM_SUSPENDING)
593                                 break;
594
595                         spin_unlock_irq(&dev->power.lock);
596
597                         schedule();
598
599                         spin_lock_irq(&dev->power.lock);
600                 }
601                 finish_wait(&dev->power.wait_queue, &wait);
602                 goto repeat;
603         }
604
605         if (dev->power.no_callbacks)
606                 goto no_callback;       /* Assume success. */
607
608         /* Carry out an asynchronous or a synchronous suspend. */
609         if (rpmflags & RPM_ASYNC) {
610                 dev->power.request = (rpmflags & RPM_AUTO) ?
611                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
612                 if (!dev->power.request_pending) {
613                         dev->power.request_pending = true;
614                         queue_work(pm_wq, &dev->power.work);
615                 }
616                 goto out;
617         }
618
619         __update_runtime_status(dev, RPM_SUSPENDING);
620
621         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
622
623         dev_pm_enable_wake_irq_check(dev, true);
624         retval = rpm_callback(callback, dev);
625         if (retval)
626                 goto fail;
627
628  no_callback:
629         __update_runtime_status(dev, RPM_SUSPENDED);
630         pm_runtime_deactivate_timer(dev);
631
632         if (dev->parent) {
633                 parent = dev->parent;
634                 atomic_add_unless(&parent->power.child_count, -1, 0);
635         }
636         wake_up_all(&dev->power.wait_queue);
637
638         if (dev->power.deferred_resume) {
639                 dev->power.deferred_resume = false;
640                 rpm_resume(dev, 0);
641                 retval = -EAGAIN;
642                 goto out;
643         }
644
645         /* Maybe the parent is now able to suspend. */
646         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
647                 spin_unlock(&dev->power.lock);
648
649                 spin_lock(&parent->power.lock);
650                 rpm_idle(parent, RPM_ASYNC);
651                 spin_unlock(&parent->power.lock);
652
653                 spin_lock(&dev->power.lock);
654         }
655
656  out:
657         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
658
659         return retval;
660
661  fail:
662         dev_pm_disable_wake_irq_check(dev);
663         __update_runtime_status(dev, RPM_ACTIVE);
664         dev->power.deferred_resume = false;
665         wake_up_all(&dev->power.wait_queue);
666
667         if (retval == -EAGAIN || retval == -EBUSY) {
668                 dev->power.runtime_error = 0;
669
670                 /*
671                  * If the callback routine failed an autosuspend, and
672                  * if the last_busy time has been updated so that there
673                  * is a new autosuspend expiration time, automatically
674                  * reschedule another autosuspend.
675                  */
676                 if ((rpmflags & RPM_AUTO) &&
677                     pm_runtime_autosuspend_expiration(dev) != 0)
678                         goto repeat;
679         } else {
680                 pm_runtime_cancel_pending(dev);
681         }
682         goto out;
683 }
684
685 /**
686  * rpm_resume - Carry out runtime resume of given device.
687  * @dev: Device to resume.
688  * @rpmflags: Flag bits.
689  *
690  * Check if the device's runtime PM status allows it to be resumed.  Cancel
691  * any scheduled or pending requests.  If another resume has been started
692  * earlier, either return immediately or wait for it to finish, depending on the
693  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
694  * parallel with this function, either tell the other process to resume after
695  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
696  * flag is set then queue a resume request; otherwise run the
697  * ->runtime_resume() callback directly.  Queue an idle notification for the
698  * device if the resume succeeded.
699  *
700  * This function must be called under dev->power.lock with interrupts disabled.
701  */
702 static int rpm_resume(struct device *dev, int rpmflags)
703         __releases(&dev->power.lock) __acquires(&dev->power.lock)
704 {
705         int (*callback)(struct device *);
706         struct device *parent = NULL;
707         int retval = 0;
708
709         trace_rpm_resume_rcuidle(dev, rpmflags);
710
711  repeat:
712         if (dev->power.runtime_error)
713                 retval = -EINVAL;
714         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
715             && dev->power.runtime_status == RPM_ACTIVE)
716                 retval = 1;
717         else if (dev->power.disable_depth > 0)
718                 retval = -EACCES;
719         if (retval)
720                 goto out;
721
722         /*
723          * Other scheduled or pending requests need to be canceled.  Small
724          * optimization: If an autosuspend timer is running, leave it running
725          * rather than cancelling it now only to restart it again in the near
726          * future.
727          */
728         dev->power.request = RPM_REQ_NONE;
729         if (!dev->power.timer_autosuspends)
730                 pm_runtime_deactivate_timer(dev);
731
732         if (dev->power.runtime_status == RPM_ACTIVE) {
733                 retval = 1;
734                 goto out;
735         }
736
737         if (dev->power.runtime_status == RPM_RESUMING
738             || dev->power.runtime_status == RPM_SUSPENDING) {
739                 DEFINE_WAIT(wait);
740
741                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
742                         if (dev->power.runtime_status == RPM_SUSPENDING)
743                                 dev->power.deferred_resume = true;
744                         else
745                                 retval = -EINPROGRESS;
746                         goto out;
747                 }
748
749                 if (dev->power.irq_safe) {
750                         spin_unlock(&dev->power.lock);
751
752                         cpu_relax();
753
754                         spin_lock(&dev->power.lock);
755                         goto repeat;
756                 }
757
758                 /* Wait for the operation carried out in parallel with us. */
759                 for (;;) {
760                         prepare_to_wait(&dev->power.wait_queue, &wait,
761                                         TASK_UNINTERRUPTIBLE);
762                         if (dev->power.runtime_status != RPM_RESUMING
763                             && dev->power.runtime_status != RPM_SUSPENDING)
764                                 break;
765
766                         spin_unlock_irq(&dev->power.lock);
767
768                         schedule();
769
770                         spin_lock_irq(&dev->power.lock);
771                 }
772                 finish_wait(&dev->power.wait_queue, &wait);
773                 goto repeat;
774         }
775
776         /*
777          * See if we can skip waking up the parent.  This is safe only if
778          * power.no_callbacks is set, because otherwise we don't know whether
779          * the resume will actually succeed.
780          */
781         if (dev->power.no_callbacks && !parent && dev->parent) {
782                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
783                 if (dev->parent->power.disable_depth > 0
784                     || dev->parent->power.ignore_children
785                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
786                         atomic_inc(&dev->parent->power.child_count);
787                         spin_unlock(&dev->parent->power.lock);
788                         retval = 1;
789                         goto no_callback;       /* Assume success. */
790                 }
791                 spin_unlock(&dev->parent->power.lock);
792         }
793
794         /* Carry out an asynchronous or a synchronous resume. */
795         if (rpmflags & RPM_ASYNC) {
796                 dev->power.request = RPM_REQ_RESUME;
797                 if (!dev->power.request_pending) {
798                         dev->power.request_pending = true;
799                         queue_work(pm_wq, &dev->power.work);
800                 }
801                 retval = 0;
802                 goto out;
803         }
804
805         if (!parent && dev->parent) {
806                 /*
807                  * Increment the parent's usage counter and resume it if
808                  * necessary.  Not needed if dev is irq-safe; then the
809                  * parent is permanently resumed.
810                  */
811                 parent = dev->parent;
812                 if (dev->power.irq_safe)
813                         goto skip_parent;
814                 spin_unlock(&dev->power.lock);
815
816                 pm_runtime_get_noresume(parent);
817
818                 spin_lock(&parent->power.lock);
819                 /*
820                  * Resume the parent if it has runtime PM enabled and not been
821                  * set to ignore its children.
822                  */
823                 if (!parent->power.disable_depth
824                     && !parent->power.ignore_children) {
825                         rpm_resume(parent, 0);
826                         if (parent->power.runtime_status != RPM_ACTIVE)
827                                 retval = -EBUSY;
828                 }
829                 spin_unlock(&parent->power.lock);
830
831                 spin_lock(&dev->power.lock);
832                 if (retval)
833                         goto out;
834                 goto repeat;
835         }
836  skip_parent:
837
838         if (dev->power.no_callbacks)
839                 goto no_callback;       /* Assume success. */
840
841         __update_runtime_status(dev, RPM_RESUMING);
842
843         callback = RPM_GET_CALLBACK(dev, runtime_resume);
844
845         dev_pm_disable_wake_irq_check(dev);
846         retval = rpm_callback(callback, dev);
847         if (retval) {
848                 __update_runtime_status(dev, RPM_SUSPENDED);
849                 pm_runtime_cancel_pending(dev);
850                 dev_pm_enable_wake_irq_check(dev, false);
851         } else {
852  no_callback:
853                 __update_runtime_status(dev, RPM_ACTIVE);
854                 pm_runtime_mark_last_busy(dev);
855                 if (parent)
856                         atomic_inc(&parent->power.child_count);
857         }
858         wake_up_all(&dev->power.wait_queue);
859
860         if (retval >= 0)
861                 rpm_idle(dev, RPM_ASYNC);
862
863  out:
864         if (parent && !dev->power.irq_safe) {
865                 spin_unlock_irq(&dev->power.lock);
866
867                 pm_runtime_put(parent);
868
869                 spin_lock_irq(&dev->power.lock);
870         }
871
872         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
873
874         return retval;
875 }
876
877 /**
878  * pm_runtime_work - Universal runtime PM work function.
879  * @work: Work structure used for scheduling the execution of this function.
880  *
881  * Use @work to get the device object the work is to be done for, determine what
882  * is to be done and execute the appropriate runtime PM function.
883  */
884 static void pm_runtime_work(struct work_struct *work)
885 {
886         struct device *dev = container_of(work, struct device, power.work);
887         enum rpm_request req;
888
889         spin_lock_irq(&dev->power.lock);
890
891         if (!dev->power.request_pending)
892                 goto out;
893
894         req = dev->power.request;
895         dev->power.request = RPM_REQ_NONE;
896         dev->power.request_pending = false;
897
898         switch (req) {
899         case RPM_REQ_NONE:
900                 break;
901         case RPM_REQ_IDLE:
902                 rpm_idle(dev, RPM_NOWAIT);
903                 break;
904         case RPM_REQ_SUSPEND:
905                 rpm_suspend(dev, RPM_NOWAIT);
906                 break;
907         case RPM_REQ_AUTOSUSPEND:
908                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
909                 break;
910         case RPM_REQ_RESUME:
911                 rpm_resume(dev, RPM_NOWAIT);
912                 break;
913         }
914
915  out:
916         spin_unlock_irq(&dev->power.lock);
917 }
918
919 /**
920  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
921  * @data: Device pointer passed by pm_schedule_suspend().
922  *
923  * Check if the time is right and queue a suspend request.
924  */
925 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
926 {
927         struct device *dev = container_of(timer, struct device, power.suspend_timer);
928         unsigned long flags;
929         u64 expires;
930
931         spin_lock_irqsave(&dev->power.lock, flags);
932
933         expires = dev->power.timer_expires;
934         /*
935          * If 'expires' is after the current time, we've been called
936          * too early.
937          */
938         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
939                 dev->power.timer_expires = 0;
940                 rpm_suspend(dev, dev->power.timer_autosuspends ?
941                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
942         }
943
944         spin_unlock_irqrestore(&dev->power.lock, flags);
945
946         return HRTIMER_NORESTART;
947 }
948
949 /**
950  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
951  * @dev: Device to suspend.
952  * @delay: Time to wait before submitting a suspend request, in milliseconds.
953  */
954 int pm_schedule_suspend(struct device *dev, unsigned int delay)
955 {
956         unsigned long flags;
957         u64 expires;
958         int retval;
959
960         spin_lock_irqsave(&dev->power.lock, flags);
961
962         if (!delay) {
963                 retval = rpm_suspend(dev, RPM_ASYNC);
964                 goto out;
965         }
966
967         retval = rpm_check_suspend_allowed(dev);
968         if (retval)
969                 goto out;
970
971         /* Other scheduled or pending requests need to be canceled. */
972         pm_runtime_cancel_pending(dev);
973
974         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
975         dev->power.timer_expires = expires;
976         dev->power.timer_autosuspends = 0;
977         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
978
979  out:
980         spin_unlock_irqrestore(&dev->power.lock, flags);
981
982         return retval;
983 }
984 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
985
986 /**
987  * __pm_runtime_idle - Entry point for runtime idle operations.
988  * @dev: Device to send idle notification for.
989  * @rpmflags: Flag bits.
990  *
991  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
992  * return immediately if it is larger than zero.  Then carry out an idle
993  * notification, either synchronous or asynchronous.
994  *
995  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
996  * or if pm_runtime_irq_safe() has been called.
997  */
998 int __pm_runtime_idle(struct device *dev, int rpmflags)
999 {
1000         unsigned long flags;
1001         int retval;
1002
1003         if (rpmflags & RPM_GET_PUT) {
1004                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1005                         trace_rpm_usage_rcuidle(dev, rpmflags);
1006                         return 0;
1007                 }
1008         }
1009
1010         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1011
1012         spin_lock_irqsave(&dev->power.lock, flags);
1013         retval = rpm_idle(dev, rpmflags);
1014         spin_unlock_irqrestore(&dev->power.lock, flags);
1015
1016         return retval;
1017 }
1018 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1019
1020 /**
1021  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1022  * @dev: Device to suspend.
1023  * @rpmflags: Flag bits.
1024  *
1025  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1026  * return immediately if it is larger than zero.  Then carry out a suspend,
1027  * either synchronous or asynchronous.
1028  *
1029  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1030  * or if pm_runtime_irq_safe() has been called.
1031  */
1032 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1033 {
1034         unsigned long flags;
1035         int retval;
1036
1037         if (rpmflags & RPM_GET_PUT) {
1038                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1039                         trace_rpm_usage_rcuidle(dev, rpmflags);
1040                         return 0;
1041                 }
1042         }
1043
1044         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1045
1046         spin_lock_irqsave(&dev->power.lock, flags);
1047         retval = rpm_suspend(dev, rpmflags);
1048         spin_unlock_irqrestore(&dev->power.lock, flags);
1049
1050         return retval;
1051 }
1052 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1053
1054 /**
1055  * __pm_runtime_resume - Entry point for runtime resume operations.
1056  * @dev: Device to resume.
1057  * @rpmflags: Flag bits.
1058  *
1059  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1060  * carry out a resume, either synchronous or asynchronous.
1061  *
1062  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1063  * or if pm_runtime_irq_safe() has been called.
1064  */
1065 int __pm_runtime_resume(struct device *dev, int rpmflags)
1066 {
1067         unsigned long flags;
1068         int retval;
1069
1070         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1071                         dev->power.runtime_status != RPM_ACTIVE);
1072
1073         if (rpmflags & RPM_GET_PUT)
1074                 atomic_inc(&dev->power.usage_count);
1075
1076         spin_lock_irqsave(&dev->power.lock, flags);
1077         retval = rpm_resume(dev, rpmflags);
1078         spin_unlock_irqrestore(&dev->power.lock, flags);
1079
1080         return retval;
1081 }
1082 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1083
1084 /**
1085  * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1086  * @dev: Device to handle.
1087  * @ign_usage_count: Whether or not to look at the current usage counter value.
1088  *
1089  * Return -EINVAL if runtime PM is disabled for @dev.
1090  *
1091  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1092  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1093  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1094  * without changing the usage counter.
1095  *
1096  * If @ign_usage_count is %true, this function can be used to prevent suspending
1097  * the device when its runtime PM status is %RPM_ACTIVE.
1098  *
1099  * If @ign_usage_count is %false, this function can be used to prevent
1100  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1101  * runtime PM usage counter is not zero.
1102  *
1103  * The caller is resposible for decrementing the runtime PM usage counter of
1104  * @dev after this function has returned a positive value for it.
1105  */
1106 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1107 {
1108         unsigned long flags;
1109         int retval;
1110
1111         spin_lock_irqsave(&dev->power.lock, flags);
1112         if (dev->power.disable_depth > 0) {
1113                 retval = -EINVAL;
1114         } else if (dev->power.runtime_status != RPM_ACTIVE) {
1115                 retval = 0;
1116         } else if (ign_usage_count) {
1117                 retval = 1;
1118                 atomic_inc(&dev->power.usage_count);
1119         } else {
1120                 retval = atomic_inc_not_zero(&dev->power.usage_count);
1121         }
1122         trace_rpm_usage_rcuidle(dev, 0);
1123         spin_unlock_irqrestore(&dev->power.lock, flags);
1124
1125         return retval;
1126 }
1127 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1128
1129 /**
1130  * __pm_runtime_set_status - Set runtime PM status of a device.
1131  * @dev: Device to handle.
1132  * @status: New runtime PM status of the device.
1133  *
1134  * If runtime PM of the device is disabled or its power.runtime_error field is
1135  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1136  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1137  * However, if the device has a parent and the parent is not active, and the
1138  * parent's power.ignore_children flag is unset, the device's status cannot be
1139  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1140  *
1141  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1142  * and the device parent's counter of unsuspended children is modified to
1143  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1144  * notification request for the parent is submitted.
1145  *
1146  * If @dev has any suppliers (as reflected by device links to them), and @status
1147  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1148  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1149  * of the @status value) and the suppliers will be deacticated on exit.  The
1150  * error returned by the failing supplier activation will be returned in that
1151  * case.
1152  */
1153 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1154 {
1155         struct device *parent = dev->parent;
1156         bool notify_parent = false;
1157         int error = 0;
1158
1159         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1160                 return -EINVAL;
1161
1162         spin_lock_irq(&dev->power.lock);
1163
1164         /*
1165          * Prevent PM-runtime from being enabled for the device or return an
1166          * error if it is enabled already and working.
1167          */
1168         if (dev->power.runtime_error || dev->power.disable_depth)
1169                 dev->power.disable_depth++;
1170         else
1171                 error = -EAGAIN;
1172
1173         spin_unlock_irq(&dev->power.lock);
1174
1175         if (error)
1176                 return error;
1177
1178         /*
1179          * If the new status is RPM_ACTIVE, the suppliers can be activated
1180          * upfront regardless of the current status, because next time
1181          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1182          * involved will be dropped down to one anyway.
1183          */
1184         if (status == RPM_ACTIVE) {
1185                 int idx = device_links_read_lock();
1186
1187                 error = rpm_get_suppliers(dev);
1188                 if (error)
1189                         status = RPM_SUSPENDED;
1190
1191                 device_links_read_unlock(idx);
1192         }
1193
1194         spin_lock_irq(&dev->power.lock);
1195
1196         if (dev->power.runtime_status == status || !parent)
1197                 goto out_set;
1198
1199         if (status == RPM_SUSPENDED) {
1200                 atomic_add_unless(&parent->power.child_count, -1, 0);
1201                 notify_parent = !parent->power.ignore_children;
1202         } else {
1203                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1204
1205                 /*
1206                  * It is invalid to put an active child under a parent that is
1207                  * not active, has runtime PM enabled and the
1208                  * 'power.ignore_children' flag unset.
1209                  */
1210                 if (!parent->power.disable_depth
1211                     && !parent->power.ignore_children
1212                     && parent->power.runtime_status != RPM_ACTIVE) {
1213                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1214                                 dev_name(dev),
1215                                 dev_name(parent));
1216                         error = -EBUSY;
1217                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1218                         atomic_inc(&parent->power.child_count);
1219                 }
1220
1221                 spin_unlock(&parent->power.lock);
1222
1223                 if (error) {
1224                         status = RPM_SUSPENDED;
1225                         goto out;
1226                 }
1227         }
1228
1229  out_set:
1230         __update_runtime_status(dev, status);
1231         if (!error)
1232                 dev->power.runtime_error = 0;
1233
1234  out:
1235         spin_unlock_irq(&dev->power.lock);
1236
1237         if (notify_parent)
1238                 pm_request_idle(parent);
1239
1240         if (status == RPM_SUSPENDED) {
1241                 int idx = device_links_read_lock();
1242
1243                 rpm_put_suppliers(dev);
1244
1245                 device_links_read_unlock(idx);
1246         }
1247
1248         pm_runtime_enable(dev);
1249
1250         return error;
1251 }
1252 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1253
1254 /**
1255  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1256  * @dev: Device to handle.
1257  *
1258  * Flush all pending requests for the device from pm_wq and wait for all
1259  * runtime PM operations involving the device in progress to complete.
1260  *
1261  * Should be called under dev->power.lock with interrupts disabled.
1262  */
1263 static void __pm_runtime_barrier(struct device *dev)
1264 {
1265         pm_runtime_deactivate_timer(dev);
1266
1267         if (dev->power.request_pending) {
1268                 dev->power.request = RPM_REQ_NONE;
1269                 spin_unlock_irq(&dev->power.lock);
1270
1271                 cancel_work_sync(&dev->power.work);
1272
1273                 spin_lock_irq(&dev->power.lock);
1274                 dev->power.request_pending = false;
1275         }
1276
1277         if (dev->power.runtime_status == RPM_SUSPENDING
1278             || dev->power.runtime_status == RPM_RESUMING
1279             || dev->power.idle_notification) {
1280                 DEFINE_WAIT(wait);
1281
1282                 /* Suspend, wake-up or idle notification in progress. */
1283                 for (;;) {
1284                         prepare_to_wait(&dev->power.wait_queue, &wait,
1285                                         TASK_UNINTERRUPTIBLE);
1286                         if (dev->power.runtime_status != RPM_SUSPENDING
1287                             && dev->power.runtime_status != RPM_RESUMING
1288                             && !dev->power.idle_notification)
1289                                 break;
1290                         spin_unlock_irq(&dev->power.lock);
1291
1292                         schedule();
1293
1294                         spin_lock_irq(&dev->power.lock);
1295                 }
1296                 finish_wait(&dev->power.wait_queue, &wait);
1297         }
1298 }
1299
1300 /**
1301  * pm_runtime_barrier - Flush pending requests and wait for completions.
1302  * @dev: Device to handle.
1303  *
1304  * Prevent the device from being suspended by incrementing its usage counter and
1305  * if there's a pending resume request for the device, wake the device up.
1306  * Next, make sure that all pending requests for the device have been flushed
1307  * from pm_wq and wait for all runtime PM operations involving the device in
1308  * progress to complete.
1309  *
1310  * Return value:
1311  * 1, if there was a resume request pending and the device had to be woken up,
1312  * 0, otherwise
1313  */
1314 int pm_runtime_barrier(struct device *dev)
1315 {
1316         int retval = 0;
1317
1318         pm_runtime_get_noresume(dev);
1319         spin_lock_irq(&dev->power.lock);
1320
1321         if (dev->power.request_pending
1322             && dev->power.request == RPM_REQ_RESUME) {
1323                 rpm_resume(dev, 0);
1324                 retval = 1;
1325         }
1326
1327         __pm_runtime_barrier(dev);
1328
1329         spin_unlock_irq(&dev->power.lock);
1330         pm_runtime_put_noidle(dev);
1331
1332         return retval;
1333 }
1334 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1335
1336 /**
1337  * __pm_runtime_disable - Disable runtime PM of a device.
1338  * @dev: Device to handle.
1339  * @check_resume: If set, check if there's a resume request for the device.
1340  *
1341  * Increment power.disable_depth for the device and if it was zero previously,
1342  * cancel all pending runtime PM requests for the device and wait for all
1343  * operations in progress to complete.  The device can be either active or
1344  * suspended after its runtime PM has been disabled.
1345  *
1346  * If @check_resume is set and there's a resume request pending when
1347  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1348  * function will wake up the device before disabling its runtime PM.
1349  */
1350 void __pm_runtime_disable(struct device *dev, bool check_resume)
1351 {
1352         spin_lock_irq(&dev->power.lock);
1353
1354         if (dev->power.disable_depth > 0) {
1355                 dev->power.disable_depth++;
1356                 goto out;
1357         }
1358
1359         /*
1360          * Wake up the device if there's a resume request pending, because that
1361          * means there probably is some I/O to process and disabling runtime PM
1362          * shouldn't prevent the device from processing the I/O.
1363          */
1364         if (check_resume && dev->power.request_pending
1365             && dev->power.request == RPM_REQ_RESUME) {
1366                 /*
1367                  * Prevent suspends and idle notifications from being carried
1368                  * out after we have woken up the device.
1369                  */
1370                 pm_runtime_get_noresume(dev);
1371
1372                 rpm_resume(dev, 0);
1373
1374                 pm_runtime_put_noidle(dev);
1375         }
1376
1377         /* Update time accounting before disabling PM-runtime. */
1378         update_pm_runtime_accounting(dev);
1379
1380         if (!dev->power.disable_depth++)
1381                 __pm_runtime_barrier(dev);
1382
1383  out:
1384         spin_unlock_irq(&dev->power.lock);
1385 }
1386 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1387
1388 /**
1389  * pm_runtime_enable - Enable runtime PM of a device.
1390  * @dev: Device to handle.
1391  */
1392 void pm_runtime_enable(struct device *dev)
1393 {
1394         unsigned long flags;
1395
1396         spin_lock_irqsave(&dev->power.lock, flags);
1397
1398         if (dev->power.disable_depth > 0) {
1399                 dev->power.disable_depth--;
1400
1401                 /* About to enable runtime pm, set accounting_timestamp to now */
1402                 if (!dev->power.disable_depth)
1403                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1404         } else {
1405                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1406         }
1407
1408         WARN(!dev->power.disable_depth &&
1409              dev->power.runtime_status == RPM_SUSPENDED &&
1410              !dev->power.ignore_children &&
1411              atomic_read(&dev->power.child_count) > 0,
1412              "Enabling runtime PM for inactive device (%s) with active children\n",
1413              dev_name(dev));
1414
1415         spin_unlock_irqrestore(&dev->power.lock, flags);
1416 }
1417 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1418
1419 /**
1420  * pm_runtime_forbid - Block runtime PM of a device.
1421  * @dev: Device to handle.
1422  *
1423  * Increase the device's usage count and clear its power.runtime_auto flag,
1424  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1425  * for it.
1426  */
1427 void pm_runtime_forbid(struct device *dev)
1428 {
1429         spin_lock_irq(&dev->power.lock);
1430         if (!dev->power.runtime_auto)
1431                 goto out;
1432
1433         dev->power.runtime_auto = false;
1434         atomic_inc(&dev->power.usage_count);
1435         rpm_resume(dev, 0);
1436
1437  out:
1438         spin_unlock_irq(&dev->power.lock);
1439 }
1440 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1441
1442 /**
1443  * pm_runtime_allow - Unblock runtime PM of a device.
1444  * @dev: Device to handle.
1445  *
1446  * Decrease the device's usage count and set its power.runtime_auto flag.
1447  */
1448 void pm_runtime_allow(struct device *dev)
1449 {
1450         spin_lock_irq(&dev->power.lock);
1451         if (dev->power.runtime_auto)
1452                 goto out;
1453
1454         dev->power.runtime_auto = true;
1455         if (atomic_dec_and_test(&dev->power.usage_count))
1456                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1457         else
1458                 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1459
1460  out:
1461         spin_unlock_irq(&dev->power.lock);
1462 }
1463 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1464
1465 /**
1466  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1467  * @dev: Device to handle.
1468  *
1469  * Set the power.no_callbacks flag, which tells the PM core that this
1470  * device is power-managed through its parent and has no runtime PM
1471  * callbacks of its own.  The runtime sysfs attributes will be removed.
1472  */
1473 void pm_runtime_no_callbacks(struct device *dev)
1474 {
1475         spin_lock_irq(&dev->power.lock);
1476         dev->power.no_callbacks = 1;
1477         spin_unlock_irq(&dev->power.lock);
1478         if (device_is_registered(dev))
1479                 rpm_sysfs_remove(dev);
1480 }
1481 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1482
1483 /**
1484  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1485  * @dev: Device to handle
1486  *
1487  * Set the power.irq_safe flag, which tells the PM core that the
1488  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1489  * always be invoked with the spinlock held and interrupts disabled.  It also
1490  * causes the parent's usage counter to be permanently incremented, preventing
1491  * the parent from runtime suspending -- otherwise an irq-safe child might have
1492  * to wait for a non-irq-safe parent.
1493  */
1494 void pm_runtime_irq_safe(struct device *dev)
1495 {
1496         if (dev->parent)
1497                 pm_runtime_get_sync(dev->parent);
1498         spin_lock_irq(&dev->power.lock);
1499         dev->power.irq_safe = 1;
1500         spin_unlock_irq(&dev->power.lock);
1501 }
1502 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1503
1504 /**
1505  * update_autosuspend - Handle a change to a device's autosuspend settings.
1506  * @dev: Device to handle.
1507  * @old_delay: The former autosuspend_delay value.
1508  * @old_use: The former use_autosuspend value.
1509  *
1510  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1511  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1512  *
1513  * This function must be called under dev->power.lock with interrupts disabled.
1514  */
1515 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1516 {
1517         int delay = dev->power.autosuspend_delay;
1518
1519         /* Should runtime suspend be prevented now? */
1520         if (dev->power.use_autosuspend && delay < 0) {
1521
1522                 /* If it used to be allowed then prevent it. */
1523                 if (!old_use || old_delay >= 0) {
1524                         atomic_inc(&dev->power.usage_count);
1525                         rpm_resume(dev, 0);
1526                 } else {
1527                         trace_rpm_usage_rcuidle(dev, 0);
1528                 }
1529         }
1530
1531         /* Runtime suspend should be allowed now. */
1532         else {
1533
1534                 /* If it used to be prevented then allow it. */
1535                 if (old_use && old_delay < 0)
1536                         atomic_dec(&dev->power.usage_count);
1537
1538                 /* Maybe we can autosuspend now. */
1539                 rpm_idle(dev, RPM_AUTO);
1540         }
1541 }
1542
1543 /**
1544  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1545  * @dev: Device to handle.
1546  * @delay: Value of the new delay in milliseconds.
1547  *
1548  * Set the device's power.autosuspend_delay value.  If it changes to negative
1549  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1550  * changes the other way, allow runtime suspends.
1551  */
1552 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1553 {
1554         int old_delay, old_use;
1555
1556         spin_lock_irq(&dev->power.lock);
1557         old_delay = dev->power.autosuspend_delay;
1558         old_use = dev->power.use_autosuspend;
1559         dev->power.autosuspend_delay = delay;
1560         update_autosuspend(dev, old_delay, old_use);
1561         spin_unlock_irq(&dev->power.lock);
1562 }
1563 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1564
1565 /**
1566  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1567  * @dev: Device to handle.
1568  * @use: New value for use_autosuspend.
1569  *
1570  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1571  * suspends as needed.
1572  */
1573 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1574 {
1575         int old_delay, old_use;
1576
1577         spin_lock_irq(&dev->power.lock);
1578         old_delay = dev->power.autosuspend_delay;
1579         old_use = dev->power.use_autosuspend;
1580         dev->power.use_autosuspend = use;
1581         update_autosuspend(dev, old_delay, old_use);
1582         spin_unlock_irq(&dev->power.lock);
1583 }
1584 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1585
1586 /**
1587  * pm_runtime_init - Initialize runtime PM fields in given device object.
1588  * @dev: Device object to initialize.
1589  */
1590 void pm_runtime_init(struct device *dev)
1591 {
1592         dev->power.runtime_status = RPM_SUSPENDED;
1593         dev->power.idle_notification = false;
1594
1595         dev->power.disable_depth = 1;
1596         atomic_set(&dev->power.usage_count, 0);
1597
1598         dev->power.runtime_error = 0;
1599
1600         atomic_set(&dev->power.child_count, 0);
1601         pm_suspend_ignore_children(dev, false);
1602         dev->power.runtime_auto = true;
1603
1604         dev->power.request_pending = false;
1605         dev->power.request = RPM_REQ_NONE;
1606         dev->power.deferred_resume = false;
1607         INIT_WORK(&dev->power.work, pm_runtime_work);
1608
1609         dev->power.timer_expires = 0;
1610         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1611         dev->power.suspend_timer.function = pm_suspend_timer_fn;
1612
1613         init_waitqueue_head(&dev->power.wait_queue);
1614 }
1615
1616 /**
1617  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1618  * @dev: Device object to re-initialize.
1619  */
1620 void pm_runtime_reinit(struct device *dev)
1621 {
1622         if (!pm_runtime_enabled(dev)) {
1623                 if (dev->power.runtime_status == RPM_ACTIVE)
1624                         pm_runtime_set_suspended(dev);
1625                 if (dev->power.irq_safe) {
1626                         spin_lock_irq(&dev->power.lock);
1627                         dev->power.irq_safe = 0;
1628                         spin_unlock_irq(&dev->power.lock);
1629                         if (dev->parent)
1630                                 pm_runtime_put(dev->parent);
1631                 }
1632         }
1633 }
1634
1635 /**
1636  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1637  * @dev: Device object being removed from device hierarchy.
1638  */
1639 void pm_runtime_remove(struct device *dev)
1640 {
1641         __pm_runtime_disable(dev, false);
1642         pm_runtime_reinit(dev);
1643 }
1644
1645 /**
1646  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1647  * @dev: Consumer device.
1648  */
1649 void pm_runtime_get_suppliers(struct device *dev)
1650 {
1651         struct device_link *link;
1652         int idx;
1653
1654         idx = device_links_read_lock();
1655
1656         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1657                                 device_links_read_lock_held())
1658                 if (link->flags & DL_FLAG_PM_RUNTIME) {
1659                         link->supplier_preactivated = true;
1660                         refcount_inc(&link->rpm_active);
1661                         pm_runtime_get_sync(link->supplier);
1662                 }
1663
1664         device_links_read_unlock(idx);
1665 }
1666
1667 /**
1668  * pm_runtime_put_suppliers - Drop references to supplier devices.
1669  * @dev: Consumer device.
1670  */
1671 void pm_runtime_put_suppliers(struct device *dev)
1672 {
1673         struct device_link *link;
1674         int idx;
1675
1676         idx = device_links_read_lock();
1677
1678         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1679                                 device_links_read_lock_held())
1680                 if (link->supplier_preactivated) {
1681                         link->supplier_preactivated = false;
1682                         if (refcount_dec_not_one(&link->rpm_active))
1683                                 pm_runtime_put(link->supplier);
1684                 }
1685
1686         device_links_read_unlock(idx);
1687 }
1688
1689 void pm_runtime_new_link(struct device *dev)
1690 {
1691         spin_lock_irq(&dev->power.lock);
1692         dev->power.links_count++;
1693         spin_unlock_irq(&dev->power.lock);
1694 }
1695
1696 static void pm_runtime_drop_link_count(struct device *dev)
1697 {
1698         spin_lock_irq(&dev->power.lock);
1699         WARN_ON(dev->power.links_count == 0);
1700         dev->power.links_count--;
1701         spin_unlock_irq(&dev->power.lock);
1702 }
1703
1704 /**
1705  * pm_runtime_drop_link - Prepare for device link removal.
1706  * @link: Device link going away.
1707  *
1708  * Drop the link count of the consumer end of @link and decrement the supplier
1709  * device's runtime PM usage counter as many times as needed to drop all of the
1710  * PM runtime reference to it from the consumer.
1711  */
1712 void pm_runtime_drop_link(struct device_link *link)
1713 {
1714         if (!(link->flags & DL_FLAG_PM_RUNTIME))
1715                 return;
1716
1717         pm_runtime_drop_link_count(link->consumer);
1718
1719         while (refcount_dec_not_one(&link->rpm_active))
1720                 pm_runtime_put(link->supplier);
1721 }
1722
1723 static bool pm_runtime_need_not_resume(struct device *dev)
1724 {
1725         return atomic_read(&dev->power.usage_count) <= 1 &&
1726                 (atomic_read(&dev->power.child_count) == 0 ||
1727                  dev->power.ignore_children);
1728 }
1729
1730 /**
1731  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1732  * @dev: Device to suspend.
1733  *
1734  * Disable runtime PM so we safely can check the device's runtime PM status and
1735  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1736  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1737  * usage and children counters don't indicate that the device was in use before
1738  * the system-wide transition under way, decrement its parent's children counter
1739  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1740  * unless we encounter errors.
1741  *
1742  * Typically this function may be invoked from a system suspend callback to make
1743  * sure the device is put into low power state and it should only be used during
1744  * system-wide PM transitions to sleep states.  It assumes that the analogous
1745  * pm_runtime_force_resume() will be used to resume the device.
1746  */
1747 int pm_runtime_force_suspend(struct device *dev)
1748 {
1749         int (*callback)(struct device *);
1750         int ret;
1751
1752         pm_runtime_disable(dev);
1753         if (pm_runtime_status_suspended(dev))
1754                 return 0;
1755
1756         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1757
1758         ret = callback ? callback(dev) : 0;
1759         if (ret)
1760                 goto err;
1761
1762         /*
1763          * If the device can stay in suspend after the system-wide transition
1764          * to the working state that will follow, drop the children counter of
1765          * its parent, but set its status to RPM_SUSPENDED anyway in case this
1766          * function will be called again for it in the meantime.
1767          */
1768         if (pm_runtime_need_not_resume(dev))
1769                 pm_runtime_set_suspended(dev);
1770         else
1771                 __update_runtime_status(dev, RPM_SUSPENDED);
1772
1773         return 0;
1774
1775 err:
1776         pm_runtime_enable(dev);
1777         return ret;
1778 }
1779 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1780
1781 /**
1782  * pm_runtime_force_resume - Force a device into resume state if needed.
1783  * @dev: Device to resume.
1784  *
1785  * Prior invoking this function we expect the user to have brought the device
1786  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1787  * those actions and bring the device into full power, if it is expected to be
1788  * used on system resume.  In the other case, we defer the resume to be managed
1789  * via runtime PM.
1790  *
1791  * Typically this function may be invoked from a system resume callback.
1792  */
1793 int pm_runtime_force_resume(struct device *dev)
1794 {
1795         int (*callback)(struct device *);
1796         int ret = 0;
1797
1798         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1799                 goto out;
1800
1801         /*
1802          * The value of the parent's children counter is correct already, so
1803          * just update the status of the device.
1804          */
1805         __update_runtime_status(dev, RPM_ACTIVE);
1806
1807         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1808
1809         ret = callback ? callback(dev) : 0;
1810         if (ret) {
1811                 pm_runtime_set_suspended(dev);
1812                 goto out;
1813         }
1814
1815         pm_runtime_mark_last_busy(dev);
1816 out:
1817         pm_runtime_enable(dev);
1818         return ret;
1819 }
1820 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);