gpio: tegra186: Don't set parent IRQ affinity
[linux-2.6-microblaze.git] / drivers / base / power / runtime.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
4  *
5  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7  */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15
16 #include "../base.h"
17 #include "power.h"
18
19 typedef int (*pm_callback_t)(struct device *);
20
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23         pm_callback_t cb;
24         const struct dev_pm_ops *ops;
25
26         if (dev->pm_domain)
27                 ops = &dev->pm_domain->ops;
28         else if (dev->type && dev->type->pm)
29                 ops = dev->type->pm;
30         else if (dev->class && dev->class->pm)
31                 ops = dev->class->pm;
32         else if (dev->bus && dev->bus->pm)
33                 ops = dev->bus->pm;
34         else
35                 ops = NULL;
36
37         if (ops)
38                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39         else
40                 cb = NULL;
41
42         if (!cb && dev->driver && dev->driver->pm)
43                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45         return cb;
46 }
47
48 #define RPM_GET_CALLBACK(dev, callback) \
49                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53
54 /**
55  * update_pm_runtime_accounting - Update the time accounting of power states
56  * @dev: Device to update the accounting for
57  *
58  * In order to be able to have time accounting of the various power states
59  * (as used by programs such as PowerTOP to show the effectiveness of runtime
60  * PM), we need to track the time spent in each state.
61  * update_pm_runtime_accounting must be called each time before the
62  * runtime_status field is updated, to account the time in the old state
63  * correctly.
64  */
65 static void update_pm_runtime_accounting(struct device *dev)
66 {
67         u64 now, last, delta;
68
69         if (dev->power.disable_depth > 0)
70                 return;
71
72         last = dev->power.accounting_timestamp;
73
74         now = ktime_get_mono_fast_ns();
75         dev->power.accounting_timestamp = now;
76
77         /*
78          * Because ktime_get_mono_fast_ns() is not monotonic during
79          * timekeeping updates, ensure that 'now' is after the last saved
80          * timesptamp.
81          */
82         if (now < last)
83                 return;
84
85         delta = now - last;
86
87         if (dev->power.runtime_status == RPM_SUSPENDED)
88                 dev->power.suspended_time += delta;
89         else
90                 dev->power.active_time += delta;
91 }
92
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
94 {
95         update_pm_runtime_accounting(dev);
96         dev->power.runtime_status = status;
97 }
98
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100 {
101         u64 time;
102         unsigned long flags;
103
104         spin_lock_irqsave(&dev->power.lock, flags);
105
106         update_pm_runtime_accounting(dev);
107         time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109         spin_unlock_irqrestore(&dev->power.lock, flags);
110
111         return time;
112 }
113
114 u64 pm_runtime_active_time(struct device *dev)
115 {
116         return rpm_get_accounted_time(dev, false);
117 }
118
119 u64 pm_runtime_suspended_time(struct device *dev)
120 {
121         return rpm_get_accounted_time(dev, true);
122 }
123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125 /**
126  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127  * @dev: Device to handle.
128  */
129 static void pm_runtime_deactivate_timer(struct device *dev)
130 {
131         if (dev->power.timer_expires > 0) {
132                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133                 dev->power.timer_expires = 0;
134         }
135 }
136
137 /**
138  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139  * @dev: Device to handle.
140  */
141 static void pm_runtime_cancel_pending(struct device *dev)
142 {
143         pm_runtime_deactivate_timer(dev);
144         /*
145          * In case there's a request pending, make sure its work function will
146          * return without doing anything.
147          */
148         dev->power.request = RPM_REQ_NONE;
149 }
150
151 /*
152  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153  * @dev: Device to handle.
154  *
155  * Compute the autosuspend-delay expiration time based on the device's
156  * power.last_busy time.  If the delay has already expired or is disabled
157  * (negative) or the power.use_autosuspend flag isn't set, return 0.
158  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
159  *
160  * This function may be called either with or without dev->power.lock held.
161  * Either way it can be racy, since power.last_busy may be updated at any time.
162  */
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
164 {
165         int autosuspend_delay;
166         u64 expires;
167
168         if (!dev->power.use_autosuspend)
169                 return 0;
170
171         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172         if (autosuspend_delay < 0)
173                 return 0;
174
175         expires  = READ_ONCE(dev->power.last_busy);
176         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177         if (expires > ktime_get_mono_fast_ns())
178                 return expires; /* Expires in the future */
179
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184 static int dev_memalloc_noio(struct device *dev, void *data)
185 {
186         return dev->power.memalloc_noio;
187 }
188
189 /*
190  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191  * @dev: Device to handle.
192  * @enable: True for setting the flag and False for clearing the flag.
193  *
194  * Set the flag for all devices in the path from the device to the
195  * root device in the device tree if @enable is true, otherwise clear
196  * the flag for devices in the path whose siblings don't set the flag.
197  *
198  * The function should only be called by block device, or network
199  * device driver for solving the deadlock problem during runtime
200  * resume/suspend:
201  *
202  *     If memory allocation with GFP_KERNEL is called inside runtime
203  *     resume/suspend callback of any one of its ancestors(or the
204  *     block device itself), the deadlock may be triggered inside the
205  *     memory allocation since it might not complete until the block
206  *     device becomes active and the involed page I/O finishes. The
207  *     situation is pointed out first by Alan Stern. Network device
208  *     are involved in iSCSI kind of situation.
209  *
210  * The lock of dev_hotplug_mutex is held in the function for handling
211  * hotplug race because pm_runtime_set_memalloc_noio() may be called
212  * in async probe().
213  *
214  * The function should be called between device_add() and device_del()
215  * on the affected device(block/network device).
216  */
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218 {
219         static DEFINE_MUTEX(dev_hotplug_mutex);
220
221         mutex_lock(&dev_hotplug_mutex);
222         for (;;) {
223                 bool enabled;
224
225                 /* hold power lock since bitfield is not SMP-safe. */
226                 spin_lock_irq(&dev->power.lock);
227                 enabled = dev->power.memalloc_noio;
228                 dev->power.memalloc_noio = enable;
229                 spin_unlock_irq(&dev->power.lock);
230
231                 /*
232                  * not need to enable ancestors any more if the device
233                  * has been enabled.
234                  */
235                 if (enabled && enable)
236                         break;
237
238                 dev = dev->parent;
239
240                 /*
241                  * clear flag of the parent device only if all the
242                  * children don't set the flag because ancestor's
243                  * flag was set by any one of the descendants.
244                  */
245                 if (!dev || (!enable &&
246                              device_for_each_child(dev, NULL,
247                                                    dev_memalloc_noio)))
248                         break;
249         }
250         mutex_unlock(&dev_hotplug_mutex);
251 }
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254 /**
255  * rpm_check_suspend_allowed - Test whether a device may be suspended.
256  * @dev: Device to test.
257  */
258 static int rpm_check_suspend_allowed(struct device *dev)
259 {
260         int retval = 0;
261
262         if (dev->power.runtime_error)
263                 retval = -EINVAL;
264         else if (dev->power.disable_depth > 0)
265                 retval = -EACCES;
266         else if (atomic_read(&dev->power.usage_count) > 0)
267                 retval = -EAGAIN;
268         else if (!dev->power.ignore_children &&
269                         atomic_read(&dev->power.child_count))
270                 retval = -EBUSY;
271
272         /* Pending resume requests take precedence over suspends. */
273         else if ((dev->power.deferred_resume
274                         && dev->power.runtime_status == RPM_SUSPENDING)
275             || (dev->power.request_pending
276                         && dev->power.request == RPM_REQ_RESUME))
277                 retval = -EAGAIN;
278         else if (__dev_pm_qos_resume_latency(dev) == 0)
279                 retval = -EPERM;
280         else if (dev->power.runtime_status == RPM_SUSPENDED)
281                 retval = 1;
282
283         return retval;
284 }
285
286 static int rpm_get_suppliers(struct device *dev)
287 {
288         struct device_link *link;
289
290         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291                                 device_links_read_lock_held()) {
292                 int retval;
293
294                 if (!(link->flags & DL_FLAG_PM_RUNTIME))
295                         continue;
296
297                 retval = pm_runtime_get_sync(link->supplier);
298                 /* Ignore suppliers with disabled runtime PM. */
299                 if (retval < 0 && retval != -EACCES) {
300                         pm_runtime_put_noidle(link->supplier);
301                         return retval;
302                 }
303                 refcount_inc(&link->rpm_active);
304         }
305         return 0;
306 }
307
308 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
309 {
310         struct device_link *link;
311
312         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
313                                 device_links_read_lock_held()) {
314
315                 while (refcount_dec_not_one(&link->rpm_active))
316                         pm_runtime_put_noidle(link->supplier);
317
318                 if (try_to_suspend)
319                         pm_request_idle(link->supplier);
320         }
321 }
322
323 static void rpm_put_suppliers(struct device *dev)
324 {
325         __rpm_put_suppliers(dev, true);
326 }
327
328 static void rpm_suspend_suppliers(struct device *dev)
329 {
330         struct device_link *link;
331         int idx = device_links_read_lock();
332
333         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
334                                 device_links_read_lock_held())
335                 pm_request_idle(link->supplier);
336
337         device_links_read_unlock(idx);
338 }
339
340 /**
341  * __rpm_callback - Run a given runtime PM callback for a given device.
342  * @cb: Runtime PM callback to run.
343  * @dev: Device to run the callback for.
344  */
345 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
346         __releases(&dev->power.lock) __acquires(&dev->power.lock)
347 {
348         int retval, idx;
349         bool use_links = dev->power.links_count > 0;
350
351         if (dev->power.irq_safe) {
352                 spin_unlock(&dev->power.lock);
353         } else {
354                 spin_unlock_irq(&dev->power.lock);
355
356                 /*
357                  * Resume suppliers if necessary.
358                  *
359                  * The device's runtime PM status cannot change until this
360                  * routine returns, so it is safe to read the status outside of
361                  * the lock.
362                  */
363                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
364                         idx = device_links_read_lock();
365
366                         retval = rpm_get_suppliers(dev);
367                         if (retval) {
368                                 rpm_put_suppliers(dev);
369                                 goto fail;
370                         }
371
372                         device_links_read_unlock(idx);
373                 }
374         }
375
376         retval = cb(dev);
377
378         if (dev->power.irq_safe) {
379                 spin_lock(&dev->power.lock);
380         } else {
381                 /*
382                  * If the device is suspending and the callback has returned
383                  * success, drop the usage counters of the suppliers that have
384                  * been reference counted on its resume.
385                  *
386                  * Do that if resume fails too.
387                  */
388                 if (use_links
389                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
390                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
391                         idx = device_links_read_lock();
392
393                         __rpm_put_suppliers(dev, false);
394
395 fail:
396                         device_links_read_unlock(idx);
397                 }
398
399                 spin_lock_irq(&dev->power.lock);
400         }
401
402         return retval;
403 }
404
405 /**
406  * rpm_idle - Notify device bus type if the device can be suspended.
407  * @dev: Device to notify the bus type about.
408  * @rpmflags: Flag bits.
409  *
410  * Check if the device's runtime PM status allows it to be suspended.  If
411  * another idle notification has been started earlier, return immediately.  If
412  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
413  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
414  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
415  *
416  * This function must be called under dev->power.lock with interrupts disabled.
417  */
418 static int rpm_idle(struct device *dev, int rpmflags)
419 {
420         int (*callback)(struct device *);
421         int retval;
422
423         trace_rpm_idle_rcuidle(dev, rpmflags);
424         retval = rpm_check_suspend_allowed(dev);
425         if (retval < 0)
426                 ;       /* Conditions are wrong. */
427
428         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
429         else if (dev->power.runtime_status != RPM_ACTIVE)
430                 retval = -EAGAIN;
431
432         /*
433          * Any pending request other than an idle notification takes
434          * precedence over us, except that the timer may be running.
435          */
436         else if (dev->power.request_pending &&
437             dev->power.request > RPM_REQ_IDLE)
438                 retval = -EAGAIN;
439
440         /* Act as though RPM_NOWAIT is always set. */
441         else if (dev->power.idle_notification)
442                 retval = -EINPROGRESS;
443         if (retval)
444                 goto out;
445
446         /* Pending requests need to be canceled. */
447         dev->power.request = RPM_REQ_NONE;
448
449         if (dev->power.no_callbacks)
450                 goto out;
451
452         /* Carry out an asynchronous or a synchronous idle notification. */
453         if (rpmflags & RPM_ASYNC) {
454                 dev->power.request = RPM_REQ_IDLE;
455                 if (!dev->power.request_pending) {
456                         dev->power.request_pending = true;
457                         queue_work(pm_wq, &dev->power.work);
458                 }
459                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
460                 return 0;
461         }
462
463         dev->power.idle_notification = true;
464
465         callback = RPM_GET_CALLBACK(dev, runtime_idle);
466
467         if (callback)
468                 retval = __rpm_callback(callback, dev);
469
470         dev->power.idle_notification = false;
471         wake_up_all(&dev->power.wait_queue);
472
473  out:
474         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
475         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
476 }
477
478 /**
479  * rpm_callback - Run a given runtime PM callback for a given device.
480  * @cb: Runtime PM callback to run.
481  * @dev: Device to run the callback for.
482  */
483 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
484 {
485         int retval;
486
487         if (!cb)
488                 return -ENOSYS;
489
490         if (dev->power.memalloc_noio) {
491                 unsigned int noio_flag;
492
493                 /*
494                  * Deadlock might be caused if memory allocation with
495                  * GFP_KERNEL happens inside runtime_suspend and
496                  * runtime_resume callbacks of one block device's
497                  * ancestor or the block device itself. Network
498                  * device might be thought as part of iSCSI block
499                  * device, so network device and its ancestor should
500                  * be marked as memalloc_noio too.
501                  */
502                 noio_flag = memalloc_noio_save();
503                 retval = __rpm_callback(cb, dev);
504                 memalloc_noio_restore(noio_flag);
505         } else {
506                 retval = __rpm_callback(cb, dev);
507         }
508
509         dev->power.runtime_error = retval;
510         return retval != -EACCES ? retval : -EIO;
511 }
512
513 /**
514  * rpm_suspend - Carry out runtime suspend of given device.
515  * @dev: Device to suspend.
516  * @rpmflags: Flag bits.
517  *
518  * Check if the device's runtime PM status allows it to be suspended.
519  * Cancel a pending idle notification, autosuspend or suspend. If
520  * another suspend has been started earlier, either return immediately
521  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
522  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
523  * otherwise run the ->runtime_suspend() callback directly. When
524  * ->runtime_suspend succeeded, if a deferred resume was requested while
525  * the callback was running then carry it out, otherwise send an idle
526  * notification for its parent (if the suspend succeeded and both
527  * ignore_children of parent->power and irq_safe of dev->power are not set).
528  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
529  * flag is set and the next autosuspend-delay expiration time is in the
530  * future, schedule another autosuspend attempt.
531  *
532  * This function must be called under dev->power.lock with interrupts disabled.
533  */
534 static int rpm_suspend(struct device *dev, int rpmflags)
535         __releases(&dev->power.lock) __acquires(&dev->power.lock)
536 {
537         int (*callback)(struct device *);
538         struct device *parent = NULL;
539         int retval;
540
541         trace_rpm_suspend_rcuidle(dev, rpmflags);
542
543  repeat:
544         retval = rpm_check_suspend_allowed(dev);
545         if (retval < 0)
546                 goto out;       /* Conditions are wrong. */
547
548         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
549         if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
550                 retval = -EAGAIN;
551         if (retval)
552                 goto out;
553
554         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
555         if ((rpmflags & RPM_AUTO)
556             && dev->power.runtime_status != RPM_SUSPENDING) {
557                 u64 expires = pm_runtime_autosuspend_expiration(dev);
558
559                 if (expires != 0) {
560                         /* Pending requests need to be canceled. */
561                         dev->power.request = RPM_REQ_NONE;
562
563                         /*
564                          * Optimization: If the timer is already running and is
565                          * set to expire at or before the autosuspend delay,
566                          * avoid the overhead of resetting it.  Just let it
567                          * expire; pm_suspend_timer_fn() will take care of the
568                          * rest.
569                          */
570                         if (!(dev->power.timer_expires &&
571                                         dev->power.timer_expires <= expires)) {
572                                 /*
573                                  * We add a slack of 25% to gather wakeups
574                                  * without sacrificing the granularity.
575                                  */
576                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
577                                                     (NSEC_PER_MSEC >> 2);
578
579                                 dev->power.timer_expires = expires;
580                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
581                                                 ns_to_ktime(expires),
582                                                 slack,
583                                                 HRTIMER_MODE_ABS);
584                         }
585                         dev->power.timer_autosuspends = 1;
586                         goto out;
587                 }
588         }
589
590         /* Other scheduled or pending requests need to be canceled. */
591         pm_runtime_cancel_pending(dev);
592
593         if (dev->power.runtime_status == RPM_SUSPENDING) {
594                 DEFINE_WAIT(wait);
595
596                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
597                         retval = -EINPROGRESS;
598                         goto out;
599                 }
600
601                 if (dev->power.irq_safe) {
602                         spin_unlock(&dev->power.lock);
603
604                         cpu_relax();
605
606                         spin_lock(&dev->power.lock);
607                         goto repeat;
608                 }
609
610                 /* Wait for the other suspend running in parallel with us. */
611                 for (;;) {
612                         prepare_to_wait(&dev->power.wait_queue, &wait,
613                                         TASK_UNINTERRUPTIBLE);
614                         if (dev->power.runtime_status != RPM_SUSPENDING)
615                                 break;
616
617                         spin_unlock_irq(&dev->power.lock);
618
619                         schedule();
620
621                         spin_lock_irq(&dev->power.lock);
622                 }
623                 finish_wait(&dev->power.wait_queue, &wait);
624                 goto repeat;
625         }
626
627         if (dev->power.no_callbacks)
628                 goto no_callback;       /* Assume success. */
629
630         /* Carry out an asynchronous or a synchronous suspend. */
631         if (rpmflags & RPM_ASYNC) {
632                 dev->power.request = (rpmflags & RPM_AUTO) ?
633                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
634                 if (!dev->power.request_pending) {
635                         dev->power.request_pending = true;
636                         queue_work(pm_wq, &dev->power.work);
637                 }
638                 goto out;
639         }
640
641         __update_runtime_status(dev, RPM_SUSPENDING);
642
643         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
644
645         dev_pm_enable_wake_irq_check(dev, true);
646         retval = rpm_callback(callback, dev);
647         if (retval)
648                 goto fail;
649
650  no_callback:
651         __update_runtime_status(dev, RPM_SUSPENDED);
652         pm_runtime_deactivate_timer(dev);
653
654         if (dev->parent) {
655                 parent = dev->parent;
656                 atomic_add_unless(&parent->power.child_count, -1, 0);
657         }
658         wake_up_all(&dev->power.wait_queue);
659
660         if (dev->power.deferred_resume) {
661                 dev->power.deferred_resume = false;
662                 rpm_resume(dev, 0);
663                 retval = -EAGAIN;
664                 goto out;
665         }
666
667         if (dev->power.irq_safe)
668                 goto out;
669
670         /* Maybe the parent is now able to suspend. */
671         if (parent && !parent->power.ignore_children) {
672                 spin_unlock(&dev->power.lock);
673
674                 spin_lock(&parent->power.lock);
675                 rpm_idle(parent, RPM_ASYNC);
676                 spin_unlock(&parent->power.lock);
677
678                 spin_lock(&dev->power.lock);
679         }
680         /* Maybe the suppliers are now able to suspend. */
681         if (dev->power.links_count > 0) {
682                 spin_unlock_irq(&dev->power.lock);
683
684                 rpm_suspend_suppliers(dev);
685
686                 spin_lock_irq(&dev->power.lock);
687         }
688
689  out:
690         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
691
692         return retval;
693
694  fail:
695         dev_pm_disable_wake_irq_check(dev);
696         __update_runtime_status(dev, RPM_ACTIVE);
697         dev->power.deferred_resume = false;
698         wake_up_all(&dev->power.wait_queue);
699
700         if (retval == -EAGAIN || retval == -EBUSY) {
701                 dev->power.runtime_error = 0;
702
703                 /*
704                  * If the callback routine failed an autosuspend, and
705                  * if the last_busy time has been updated so that there
706                  * is a new autosuspend expiration time, automatically
707                  * reschedule another autosuspend.
708                  */
709                 if ((rpmflags & RPM_AUTO) &&
710                     pm_runtime_autosuspend_expiration(dev) != 0)
711                         goto repeat;
712         } else {
713                 pm_runtime_cancel_pending(dev);
714         }
715         goto out;
716 }
717
718 /**
719  * rpm_resume - Carry out runtime resume of given device.
720  * @dev: Device to resume.
721  * @rpmflags: Flag bits.
722  *
723  * Check if the device's runtime PM status allows it to be resumed.  Cancel
724  * any scheduled or pending requests.  If another resume has been started
725  * earlier, either return immediately or wait for it to finish, depending on the
726  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
727  * parallel with this function, either tell the other process to resume after
728  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
729  * flag is set then queue a resume request; otherwise run the
730  * ->runtime_resume() callback directly.  Queue an idle notification for the
731  * device if the resume succeeded.
732  *
733  * This function must be called under dev->power.lock with interrupts disabled.
734  */
735 static int rpm_resume(struct device *dev, int rpmflags)
736         __releases(&dev->power.lock) __acquires(&dev->power.lock)
737 {
738         int (*callback)(struct device *);
739         struct device *parent = NULL;
740         int retval = 0;
741
742         trace_rpm_resume_rcuidle(dev, rpmflags);
743
744  repeat:
745         if (dev->power.runtime_error)
746                 retval = -EINVAL;
747         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
748             && dev->power.runtime_status == RPM_ACTIVE)
749                 retval = 1;
750         else if (dev->power.disable_depth > 0)
751                 retval = -EACCES;
752         if (retval)
753                 goto out;
754
755         /*
756          * Other scheduled or pending requests need to be canceled.  Small
757          * optimization: If an autosuspend timer is running, leave it running
758          * rather than cancelling it now only to restart it again in the near
759          * future.
760          */
761         dev->power.request = RPM_REQ_NONE;
762         if (!dev->power.timer_autosuspends)
763                 pm_runtime_deactivate_timer(dev);
764
765         if (dev->power.runtime_status == RPM_ACTIVE) {
766                 retval = 1;
767                 goto out;
768         }
769
770         if (dev->power.runtime_status == RPM_RESUMING
771             || dev->power.runtime_status == RPM_SUSPENDING) {
772                 DEFINE_WAIT(wait);
773
774                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
775                         if (dev->power.runtime_status == RPM_SUSPENDING)
776                                 dev->power.deferred_resume = true;
777                         else
778                                 retval = -EINPROGRESS;
779                         goto out;
780                 }
781
782                 if (dev->power.irq_safe) {
783                         spin_unlock(&dev->power.lock);
784
785                         cpu_relax();
786
787                         spin_lock(&dev->power.lock);
788                         goto repeat;
789                 }
790
791                 /* Wait for the operation carried out in parallel with us. */
792                 for (;;) {
793                         prepare_to_wait(&dev->power.wait_queue, &wait,
794                                         TASK_UNINTERRUPTIBLE);
795                         if (dev->power.runtime_status != RPM_RESUMING
796                             && dev->power.runtime_status != RPM_SUSPENDING)
797                                 break;
798
799                         spin_unlock_irq(&dev->power.lock);
800
801                         schedule();
802
803                         spin_lock_irq(&dev->power.lock);
804                 }
805                 finish_wait(&dev->power.wait_queue, &wait);
806                 goto repeat;
807         }
808
809         /*
810          * See if we can skip waking up the parent.  This is safe only if
811          * power.no_callbacks is set, because otherwise we don't know whether
812          * the resume will actually succeed.
813          */
814         if (dev->power.no_callbacks && !parent && dev->parent) {
815                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
816                 if (dev->parent->power.disable_depth > 0
817                     || dev->parent->power.ignore_children
818                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
819                         atomic_inc(&dev->parent->power.child_count);
820                         spin_unlock(&dev->parent->power.lock);
821                         retval = 1;
822                         goto no_callback;       /* Assume success. */
823                 }
824                 spin_unlock(&dev->parent->power.lock);
825         }
826
827         /* Carry out an asynchronous or a synchronous resume. */
828         if (rpmflags & RPM_ASYNC) {
829                 dev->power.request = RPM_REQ_RESUME;
830                 if (!dev->power.request_pending) {
831                         dev->power.request_pending = true;
832                         queue_work(pm_wq, &dev->power.work);
833                 }
834                 retval = 0;
835                 goto out;
836         }
837
838         if (!parent && dev->parent) {
839                 /*
840                  * Increment the parent's usage counter and resume it if
841                  * necessary.  Not needed if dev is irq-safe; then the
842                  * parent is permanently resumed.
843                  */
844                 parent = dev->parent;
845                 if (dev->power.irq_safe)
846                         goto skip_parent;
847                 spin_unlock(&dev->power.lock);
848
849                 pm_runtime_get_noresume(parent);
850
851                 spin_lock(&parent->power.lock);
852                 /*
853                  * Resume the parent if it has runtime PM enabled and not been
854                  * set to ignore its children.
855                  */
856                 if (!parent->power.disable_depth
857                     && !parent->power.ignore_children) {
858                         rpm_resume(parent, 0);
859                         if (parent->power.runtime_status != RPM_ACTIVE)
860                                 retval = -EBUSY;
861                 }
862                 spin_unlock(&parent->power.lock);
863
864                 spin_lock(&dev->power.lock);
865                 if (retval)
866                         goto out;
867                 goto repeat;
868         }
869  skip_parent:
870
871         if (dev->power.no_callbacks)
872                 goto no_callback;       /* Assume success. */
873
874         __update_runtime_status(dev, RPM_RESUMING);
875
876         callback = RPM_GET_CALLBACK(dev, runtime_resume);
877
878         dev_pm_disable_wake_irq_check(dev);
879         retval = rpm_callback(callback, dev);
880         if (retval) {
881                 __update_runtime_status(dev, RPM_SUSPENDED);
882                 pm_runtime_cancel_pending(dev);
883                 dev_pm_enable_wake_irq_check(dev, false);
884         } else {
885  no_callback:
886                 __update_runtime_status(dev, RPM_ACTIVE);
887                 pm_runtime_mark_last_busy(dev);
888                 if (parent)
889                         atomic_inc(&parent->power.child_count);
890         }
891         wake_up_all(&dev->power.wait_queue);
892
893         if (retval >= 0)
894                 rpm_idle(dev, RPM_ASYNC);
895
896  out:
897         if (parent && !dev->power.irq_safe) {
898                 spin_unlock_irq(&dev->power.lock);
899
900                 pm_runtime_put(parent);
901
902                 spin_lock_irq(&dev->power.lock);
903         }
904
905         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
906
907         return retval;
908 }
909
910 /**
911  * pm_runtime_work - Universal runtime PM work function.
912  * @work: Work structure used for scheduling the execution of this function.
913  *
914  * Use @work to get the device object the work is to be done for, determine what
915  * is to be done and execute the appropriate runtime PM function.
916  */
917 static void pm_runtime_work(struct work_struct *work)
918 {
919         struct device *dev = container_of(work, struct device, power.work);
920         enum rpm_request req;
921
922         spin_lock_irq(&dev->power.lock);
923
924         if (!dev->power.request_pending)
925                 goto out;
926
927         req = dev->power.request;
928         dev->power.request = RPM_REQ_NONE;
929         dev->power.request_pending = false;
930
931         switch (req) {
932         case RPM_REQ_NONE:
933                 break;
934         case RPM_REQ_IDLE:
935                 rpm_idle(dev, RPM_NOWAIT);
936                 break;
937         case RPM_REQ_SUSPEND:
938                 rpm_suspend(dev, RPM_NOWAIT);
939                 break;
940         case RPM_REQ_AUTOSUSPEND:
941                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
942                 break;
943         case RPM_REQ_RESUME:
944                 rpm_resume(dev, RPM_NOWAIT);
945                 break;
946         }
947
948  out:
949         spin_unlock_irq(&dev->power.lock);
950 }
951
952 /**
953  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
954  * @timer: hrtimer used by pm_schedule_suspend().
955  *
956  * Check if the time is right and queue a suspend request.
957  */
958 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
959 {
960         struct device *dev = container_of(timer, struct device, power.suspend_timer);
961         unsigned long flags;
962         u64 expires;
963
964         spin_lock_irqsave(&dev->power.lock, flags);
965
966         expires = dev->power.timer_expires;
967         /*
968          * If 'expires' is after the current time, we've been called
969          * too early.
970          */
971         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
972                 dev->power.timer_expires = 0;
973                 rpm_suspend(dev, dev->power.timer_autosuspends ?
974                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
975         }
976
977         spin_unlock_irqrestore(&dev->power.lock, flags);
978
979         return HRTIMER_NORESTART;
980 }
981
982 /**
983  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
984  * @dev: Device to suspend.
985  * @delay: Time to wait before submitting a suspend request, in milliseconds.
986  */
987 int pm_schedule_suspend(struct device *dev, unsigned int delay)
988 {
989         unsigned long flags;
990         u64 expires;
991         int retval;
992
993         spin_lock_irqsave(&dev->power.lock, flags);
994
995         if (!delay) {
996                 retval = rpm_suspend(dev, RPM_ASYNC);
997                 goto out;
998         }
999
1000         retval = rpm_check_suspend_allowed(dev);
1001         if (retval)
1002                 goto out;
1003
1004         /* Other scheduled or pending requests need to be canceled. */
1005         pm_runtime_cancel_pending(dev);
1006
1007         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1008         dev->power.timer_expires = expires;
1009         dev->power.timer_autosuspends = 0;
1010         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1011
1012  out:
1013         spin_unlock_irqrestore(&dev->power.lock, flags);
1014
1015         return retval;
1016 }
1017 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1018
1019 /**
1020  * __pm_runtime_idle - Entry point for runtime idle operations.
1021  * @dev: Device to send idle notification for.
1022  * @rpmflags: Flag bits.
1023  *
1024  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1025  * return immediately if it is larger than zero.  Then carry out an idle
1026  * notification, either synchronous or asynchronous.
1027  *
1028  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1029  * or if pm_runtime_irq_safe() has been called.
1030  */
1031 int __pm_runtime_idle(struct device *dev, int rpmflags)
1032 {
1033         unsigned long flags;
1034         int retval;
1035
1036         if (rpmflags & RPM_GET_PUT) {
1037                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1038                         trace_rpm_usage_rcuidle(dev, rpmflags);
1039                         return 0;
1040                 }
1041         }
1042
1043         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1044
1045         spin_lock_irqsave(&dev->power.lock, flags);
1046         retval = rpm_idle(dev, rpmflags);
1047         spin_unlock_irqrestore(&dev->power.lock, flags);
1048
1049         return retval;
1050 }
1051 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1052
1053 /**
1054  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1055  * @dev: Device to suspend.
1056  * @rpmflags: Flag bits.
1057  *
1058  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1059  * return immediately if it is larger than zero.  Then carry out a suspend,
1060  * either synchronous or asynchronous.
1061  *
1062  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1063  * or if pm_runtime_irq_safe() has been called.
1064  */
1065 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1066 {
1067         unsigned long flags;
1068         int retval;
1069
1070         if (rpmflags & RPM_GET_PUT) {
1071                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1072                         trace_rpm_usage_rcuidle(dev, rpmflags);
1073                         return 0;
1074                 }
1075         }
1076
1077         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1078
1079         spin_lock_irqsave(&dev->power.lock, flags);
1080         retval = rpm_suspend(dev, rpmflags);
1081         spin_unlock_irqrestore(&dev->power.lock, flags);
1082
1083         return retval;
1084 }
1085 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1086
1087 /**
1088  * __pm_runtime_resume - Entry point for runtime resume operations.
1089  * @dev: Device to resume.
1090  * @rpmflags: Flag bits.
1091  *
1092  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1093  * carry out a resume, either synchronous or asynchronous.
1094  *
1095  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1096  * or if pm_runtime_irq_safe() has been called.
1097  */
1098 int __pm_runtime_resume(struct device *dev, int rpmflags)
1099 {
1100         unsigned long flags;
1101         int retval;
1102
1103         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1104                         dev->power.runtime_status != RPM_ACTIVE);
1105
1106         if (rpmflags & RPM_GET_PUT)
1107                 atomic_inc(&dev->power.usage_count);
1108
1109         spin_lock_irqsave(&dev->power.lock, flags);
1110         retval = rpm_resume(dev, rpmflags);
1111         spin_unlock_irqrestore(&dev->power.lock, flags);
1112
1113         return retval;
1114 }
1115 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1116
1117 /**
1118  * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1119  * @dev: Device to handle.
1120  * @ign_usage_count: Whether or not to look at the current usage counter value.
1121  *
1122  * Return -EINVAL if runtime PM is disabled for @dev.
1123  *
1124  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1125  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1126  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1127  * without changing the usage counter.
1128  *
1129  * If @ign_usage_count is %true, this function can be used to prevent suspending
1130  * the device when its runtime PM status is %RPM_ACTIVE.
1131  *
1132  * If @ign_usage_count is %false, this function can be used to prevent
1133  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1134  * runtime PM usage counter is not zero.
1135  *
1136  * The caller is responsible for decrementing the runtime PM usage counter of
1137  * @dev after this function has returned a positive value for it.
1138  */
1139 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1140 {
1141         unsigned long flags;
1142         int retval;
1143
1144         spin_lock_irqsave(&dev->power.lock, flags);
1145         if (dev->power.disable_depth > 0) {
1146                 retval = -EINVAL;
1147         } else if (dev->power.runtime_status != RPM_ACTIVE) {
1148                 retval = 0;
1149         } else if (ign_usage_count) {
1150                 retval = 1;
1151                 atomic_inc(&dev->power.usage_count);
1152         } else {
1153                 retval = atomic_inc_not_zero(&dev->power.usage_count);
1154         }
1155         trace_rpm_usage_rcuidle(dev, 0);
1156         spin_unlock_irqrestore(&dev->power.lock, flags);
1157
1158         return retval;
1159 }
1160 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1161
1162 /**
1163  * __pm_runtime_set_status - Set runtime PM status of a device.
1164  * @dev: Device to handle.
1165  * @status: New runtime PM status of the device.
1166  *
1167  * If runtime PM of the device is disabled or its power.runtime_error field is
1168  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1169  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1170  * However, if the device has a parent and the parent is not active, and the
1171  * parent's power.ignore_children flag is unset, the device's status cannot be
1172  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1173  *
1174  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1175  * and the device parent's counter of unsuspended children is modified to
1176  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1177  * notification request for the parent is submitted.
1178  *
1179  * If @dev has any suppliers (as reflected by device links to them), and @status
1180  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1181  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1182  * of the @status value) and the suppliers will be deacticated on exit.  The
1183  * error returned by the failing supplier activation will be returned in that
1184  * case.
1185  */
1186 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1187 {
1188         struct device *parent = dev->parent;
1189         bool notify_parent = false;
1190         int error = 0;
1191
1192         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1193                 return -EINVAL;
1194
1195         spin_lock_irq(&dev->power.lock);
1196
1197         /*
1198          * Prevent PM-runtime from being enabled for the device or return an
1199          * error if it is enabled already and working.
1200          */
1201         if (dev->power.runtime_error || dev->power.disable_depth)
1202                 dev->power.disable_depth++;
1203         else
1204                 error = -EAGAIN;
1205
1206         spin_unlock_irq(&dev->power.lock);
1207
1208         if (error)
1209                 return error;
1210
1211         /*
1212          * If the new status is RPM_ACTIVE, the suppliers can be activated
1213          * upfront regardless of the current status, because next time
1214          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1215          * involved will be dropped down to one anyway.
1216          */
1217         if (status == RPM_ACTIVE) {
1218                 int idx = device_links_read_lock();
1219
1220                 error = rpm_get_suppliers(dev);
1221                 if (error)
1222                         status = RPM_SUSPENDED;
1223
1224                 device_links_read_unlock(idx);
1225         }
1226
1227         spin_lock_irq(&dev->power.lock);
1228
1229         if (dev->power.runtime_status == status || !parent)
1230                 goto out_set;
1231
1232         if (status == RPM_SUSPENDED) {
1233                 atomic_add_unless(&parent->power.child_count, -1, 0);
1234                 notify_parent = !parent->power.ignore_children;
1235         } else {
1236                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1237
1238                 /*
1239                  * It is invalid to put an active child under a parent that is
1240                  * not active, has runtime PM enabled and the
1241                  * 'power.ignore_children' flag unset.
1242                  */
1243                 if (!parent->power.disable_depth
1244                     && !parent->power.ignore_children
1245                     && parent->power.runtime_status != RPM_ACTIVE) {
1246                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1247                                 dev_name(dev),
1248                                 dev_name(parent));
1249                         error = -EBUSY;
1250                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1251                         atomic_inc(&parent->power.child_count);
1252                 }
1253
1254                 spin_unlock(&parent->power.lock);
1255
1256                 if (error) {
1257                         status = RPM_SUSPENDED;
1258                         goto out;
1259                 }
1260         }
1261
1262  out_set:
1263         __update_runtime_status(dev, status);
1264         if (!error)
1265                 dev->power.runtime_error = 0;
1266
1267  out:
1268         spin_unlock_irq(&dev->power.lock);
1269
1270         if (notify_parent)
1271                 pm_request_idle(parent);
1272
1273         if (status == RPM_SUSPENDED) {
1274                 int idx = device_links_read_lock();
1275
1276                 rpm_put_suppliers(dev);
1277
1278                 device_links_read_unlock(idx);
1279         }
1280
1281         pm_runtime_enable(dev);
1282
1283         return error;
1284 }
1285 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1286
1287 /**
1288  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1289  * @dev: Device to handle.
1290  *
1291  * Flush all pending requests for the device from pm_wq and wait for all
1292  * runtime PM operations involving the device in progress to complete.
1293  *
1294  * Should be called under dev->power.lock with interrupts disabled.
1295  */
1296 static void __pm_runtime_barrier(struct device *dev)
1297 {
1298         pm_runtime_deactivate_timer(dev);
1299
1300         if (dev->power.request_pending) {
1301                 dev->power.request = RPM_REQ_NONE;
1302                 spin_unlock_irq(&dev->power.lock);
1303
1304                 cancel_work_sync(&dev->power.work);
1305
1306                 spin_lock_irq(&dev->power.lock);
1307                 dev->power.request_pending = false;
1308         }
1309
1310         if (dev->power.runtime_status == RPM_SUSPENDING
1311             || dev->power.runtime_status == RPM_RESUMING
1312             || dev->power.idle_notification) {
1313                 DEFINE_WAIT(wait);
1314
1315                 /* Suspend, wake-up or idle notification in progress. */
1316                 for (;;) {
1317                         prepare_to_wait(&dev->power.wait_queue, &wait,
1318                                         TASK_UNINTERRUPTIBLE);
1319                         if (dev->power.runtime_status != RPM_SUSPENDING
1320                             && dev->power.runtime_status != RPM_RESUMING
1321                             && !dev->power.idle_notification)
1322                                 break;
1323                         spin_unlock_irq(&dev->power.lock);
1324
1325                         schedule();
1326
1327                         spin_lock_irq(&dev->power.lock);
1328                 }
1329                 finish_wait(&dev->power.wait_queue, &wait);
1330         }
1331 }
1332
1333 /**
1334  * pm_runtime_barrier - Flush pending requests and wait for completions.
1335  * @dev: Device to handle.
1336  *
1337  * Prevent the device from being suspended by incrementing its usage counter and
1338  * if there's a pending resume request for the device, wake the device up.
1339  * Next, make sure that all pending requests for the device have been flushed
1340  * from pm_wq and wait for all runtime PM operations involving the device in
1341  * progress to complete.
1342  *
1343  * Return value:
1344  * 1, if there was a resume request pending and the device had to be woken up,
1345  * 0, otherwise
1346  */
1347 int pm_runtime_barrier(struct device *dev)
1348 {
1349         int retval = 0;
1350
1351         pm_runtime_get_noresume(dev);
1352         spin_lock_irq(&dev->power.lock);
1353
1354         if (dev->power.request_pending
1355             && dev->power.request == RPM_REQ_RESUME) {
1356                 rpm_resume(dev, 0);
1357                 retval = 1;
1358         }
1359
1360         __pm_runtime_barrier(dev);
1361
1362         spin_unlock_irq(&dev->power.lock);
1363         pm_runtime_put_noidle(dev);
1364
1365         return retval;
1366 }
1367 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1368
1369 /**
1370  * __pm_runtime_disable - Disable runtime PM of a device.
1371  * @dev: Device to handle.
1372  * @check_resume: If set, check if there's a resume request for the device.
1373  *
1374  * Increment power.disable_depth for the device and if it was zero previously,
1375  * cancel all pending runtime PM requests for the device and wait for all
1376  * operations in progress to complete.  The device can be either active or
1377  * suspended after its runtime PM has been disabled.
1378  *
1379  * If @check_resume is set and there's a resume request pending when
1380  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1381  * function will wake up the device before disabling its runtime PM.
1382  */
1383 void __pm_runtime_disable(struct device *dev, bool check_resume)
1384 {
1385         spin_lock_irq(&dev->power.lock);
1386
1387         if (dev->power.disable_depth > 0) {
1388                 dev->power.disable_depth++;
1389                 goto out;
1390         }
1391
1392         /*
1393          * Wake up the device if there's a resume request pending, because that
1394          * means there probably is some I/O to process and disabling runtime PM
1395          * shouldn't prevent the device from processing the I/O.
1396          */
1397         if (check_resume && dev->power.request_pending
1398             && dev->power.request == RPM_REQ_RESUME) {
1399                 /*
1400                  * Prevent suspends and idle notifications from being carried
1401                  * out after we have woken up the device.
1402                  */
1403                 pm_runtime_get_noresume(dev);
1404
1405                 rpm_resume(dev, 0);
1406
1407                 pm_runtime_put_noidle(dev);
1408         }
1409
1410         /* Update time accounting before disabling PM-runtime. */
1411         update_pm_runtime_accounting(dev);
1412
1413         if (!dev->power.disable_depth++)
1414                 __pm_runtime_barrier(dev);
1415
1416  out:
1417         spin_unlock_irq(&dev->power.lock);
1418 }
1419 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1420
1421 /**
1422  * pm_runtime_enable - Enable runtime PM of a device.
1423  * @dev: Device to handle.
1424  */
1425 void pm_runtime_enable(struct device *dev)
1426 {
1427         unsigned long flags;
1428
1429         spin_lock_irqsave(&dev->power.lock, flags);
1430
1431         if (dev->power.disable_depth > 0) {
1432                 dev->power.disable_depth--;
1433
1434                 /* About to enable runtime pm, set accounting_timestamp to now */
1435                 if (!dev->power.disable_depth)
1436                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1437         } else {
1438                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1439         }
1440
1441         WARN(!dev->power.disable_depth &&
1442              dev->power.runtime_status == RPM_SUSPENDED &&
1443              !dev->power.ignore_children &&
1444              atomic_read(&dev->power.child_count) > 0,
1445              "Enabling runtime PM for inactive device (%s) with active children\n",
1446              dev_name(dev));
1447
1448         spin_unlock_irqrestore(&dev->power.lock, flags);
1449 }
1450 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1451
1452 /**
1453  * pm_runtime_forbid - Block runtime PM of a device.
1454  * @dev: Device to handle.
1455  *
1456  * Increase the device's usage count and clear its power.runtime_auto flag,
1457  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1458  * for it.
1459  */
1460 void pm_runtime_forbid(struct device *dev)
1461 {
1462         spin_lock_irq(&dev->power.lock);
1463         if (!dev->power.runtime_auto)
1464                 goto out;
1465
1466         dev->power.runtime_auto = false;
1467         atomic_inc(&dev->power.usage_count);
1468         rpm_resume(dev, 0);
1469
1470  out:
1471         spin_unlock_irq(&dev->power.lock);
1472 }
1473 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1474
1475 /**
1476  * pm_runtime_allow - Unblock runtime PM of a device.
1477  * @dev: Device to handle.
1478  *
1479  * Decrease the device's usage count and set its power.runtime_auto flag.
1480  */
1481 void pm_runtime_allow(struct device *dev)
1482 {
1483         spin_lock_irq(&dev->power.lock);
1484         if (dev->power.runtime_auto)
1485                 goto out;
1486
1487         dev->power.runtime_auto = true;
1488         if (atomic_dec_and_test(&dev->power.usage_count))
1489                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1490         else
1491                 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1492
1493  out:
1494         spin_unlock_irq(&dev->power.lock);
1495 }
1496 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1497
1498 /**
1499  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1500  * @dev: Device to handle.
1501  *
1502  * Set the power.no_callbacks flag, which tells the PM core that this
1503  * device is power-managed through its parent and has no runtime PM
1504  * callbacks of its own.  The runtime sysfs attributes will be removed.
1505  */
1506 void pm_runtime_no_callbacks(struct device *dev)
1507 {
1508         spin_lock_irq(&dev->power.lock);
1509         dev->power.no_callbacks = 1;
1510         spin_unlock_irq(&dev->power.lock);
1511         if (device_is_registered(dev))
1512                 rpm_sysfs_remove(dev);
1513 }
1514 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1515
1516 /**
1517  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1518  * @dev: Device to handle
1519  *
1520  * Set the power.irq_safe flag, which tells the PM core that the
1521  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1522  * always be invoked with the spinlock held and interrupts disabled.  It also
1523  * causes the parent's usage counter to be permanently incremented, preventing
1524  * the parent from runtime suspending -- otherwise an irq-safe child might have
1525  * to wait for a non-irq-safe parent.
1526  */
1527 void pm_runtime_irq_safe(struct device *dev)
1528 {
1529         if (dev->parent)
1530                 pm_runtime_get_sync(dev->parent);
1531         spin_lock_irq(&dev->power.lock);
1532         dev->power.irq_safe = 1;
1533         spin_unlock_irq(&dev->power.lock);
1534 }
1535 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1536
1537 /**
1538  * update_autosuspend - Handle a change to a device's autosuspend settings.
1539  * @dev: Device to handle.
1540  * @old_delay: The former autosuspend_delay value.
1541  * @old_use: The former use_autosuspend value.
1542  *
1543  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1544  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1545  *
1546  * This function must be called under dev->power.lock with interrupts disabled.
1547  */
1548 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1549 {
1550         int delay = dev->power.autosuspend_delay;
1551
1552         /* Should runtime suspend be prevented now? */
1553         if (dev->power.use_autosuspend && delay < 0) {
1554
1555                 /* If it used to be allowed then prevent it. */
1556                 if (!old_use || old_delay >= 0) {
1557                         atomic_inc(&dev->power.usage_count);
1558                         rpm_resume(dev, 0);
1559                 } else {
1560                         trace_rpm_usage_rcuidle(dev, 0);
1561                 }
1562         }
1563
1564         /* Runtime suspend should be allowed now. */
1565         else {
1566
1567                 /* If it used to be prevented then allow it. */
1568                 if (old_use && old_delay < 0)
1569                         atomic_dec(&dev->power.usage_count);
1570
1571                 /* Maybe we can autosuspend now. */
1572                 rpm_idle(dev, RPM_AUTO);
1573         }
1574 }
1575
1576 /**
1577  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1578  * @dev: Device to handle.
1579  * @delay: Value of the new delay in milliseconds.
1580  *
1581  * Set the device's power.autosuspend_delay value.  If it changes to negative
1582  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1583  * changes the other way, allow runtime suspends.
1584  */
1585 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1586 {
1587         int old_delay, old_use;
1588
1589         spin_lock_irq(&dev->power.lock);
1590         old_delay = dev->power.autosuspend_delay;
1591         old_use = dev->power.use_autosuspend;
1592         dev->power.autosuspend_delay = delay;
1593         update_autosuspend(dev, old_delay, old_use);
1594         spin_unlock_irq(&dev->power.lock);
1595 }
1596 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1597
1598 /**
1599  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1600  * @dev: Device to handle.
1601  * @use: New value for use_autosuspend.
1602  *
1603  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1604  * suspends as needed.
1605  */
1606 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1607 {
1608         int old_delay, old_use;
1609
1610         spin_lock_irq(&dev->power.lock);
1611         old_delay = dev->power.autosuspend_delay;
1612         old_use = dev->power.use_autosuspend;
1613         dev->power.use_autosuspend = use;
1614         update_autosuspend(dev, old_delay, old_use);
1615         spin_unlock_irq(&dev->power.lock);
1616 }
1617 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1618
1619 /**
1620  * pm_runtime_init - Initialize runtime PM fields in given device object.
1621  * @dev: Device object to initialize.
1622  */
1623 void pm_runtime_init(struct device *dev)
1624 {
1625         dev->power.runtime_status = RPM_SUSPENDED;
1626         dev->power.idle_notification = false;
1627
1628         dev->power.disable_depth = 1;
1629         atomic_set(&dev->power.usage_count, 0);
1630
1631         dev->power.runtime_error = 0;
1632
1633         atomic_set(&dev->power.child_count, 0);
1634         pm_suspend_ignore_children(dev, false);
1635         dev->power.runtime_auto = true;
1636
1637         dev->power.request_pending = false;
1638         dev->power.request = RPM_REQ_NONE;
1639         dev->power.deferred_resume = false;
1640         INIT_WORK(&dev->power.work, pm_runtime_work);
1641
1642         dev->power.timer_expires = 0;
1643         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1644         dev->power.suspend_timer.function = pm_suspend_timer_fn;
1645
1646         init_waitqueue_head(&dev->power.wait_queue);
1647 }
1648
1649 /**
1650  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1651  * @dev: Device object to re-initialize.
1652  */
1653 void pm_runtime_reinit(struct device *dev)
1654 {
1655         if (!pm_runtime_enabled(dev)) {
1656                 if (dev->power.runtime_status == RPM_ACTIVE)
1657                         pm_runtime_set_suspended(dev);
1658                 if (dev->power.irq_safe) {
1659                         spin_lock_irq(&dev->power.lock);
1660                         dev->power.irq_safe = 0;
1661                         spin_unlock_irq(&dev->power.lock);
1662                         if (dev->parent)
1663                                 pm_runtime_put(dev->parent);
1664                 }
1665         }
1666 }
1667
1668 /**
1669  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1670  * @dev: Device object being removed from device hierarchy.
1671  */
1672 void pm_runtime_remove(struct device *dev)
1673 {
1674         __pm_runtime_disable(dev, false);
1675         pm_runtime_reinit(dev);
1676 }
1677
1678 /**
1679  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1680  * @dev: Consumer device.
1681  */
1682 void pm_runtime_get_suppliers(struct device *dev)
1683 {
1684         struct device_link *link;
1685         int idx;
1686
1687         idx = device_links_read_lock();
1688
1689         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1690                                 device_links_read_lock_held())
1691                 if (link->flags & DL_FLAG_PM_RUNTIME) {
1692                         link->supplier_preactivated = true;
1693                         pm_runtime_get_sync(link->supplier);
1694                         refcount_inc(&link->rpm_active);
1695                 }
1696
1697         device_links_read_unlock(idx);
1698 }
1699
1700 /**
1701  * pm_runtime_put_suppliers - Drop references to supplier devices.
1702  * @dev: Consumer device.
1703  */
1704 void pm_runtime_put_suppliers(struct device *dev)
1705 {
1706         struct device_link *link;
1707         unsigned long flags;
1708         bool put;
1709         int idx;
1710
1711         idx = device_links_read_lock();
1712
1713         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1714                                 device_links_read_lock_held())
1715                 if (link->supplier_preactivated) {
1716                         link->supplier_preactivated = false;
1717                         spin_lock_irqsave(&dev->power.lock, flags);
1718                         put = pm_runtime_status_suspended(dev) &&
1719                               refcount_dec_not_one(&link->rpm_active);
1720                         spin_unlock_irqrestore(&dev->power.lock, flags);
1721                         if (put)
1722                                 pm_runtime_put(link->supplier);
1723                 }
1724
1725         device_links_read_unlock(idx);
1726 }
1727
1728 void pm_runtime_new_link(struct device *dev)
1729 {
1730         spin_lock_irq(&dev->power.lock);
1731         dev->power.links_count++;
1732         spin_unlock_irq(&dev->power.lock);
1733 }
1734
1735 static void pm_runtime_drop_link_count(struct device *dev)
1736 {
1737         spin_lock_irq(&dev->power.lock);
1738         WARN_ON(dev->power.links_count == 0);
1739         dev->power.links_count--;
1740         spin_unlock_irq(&dev->power.lock);
1741 }
1742
1743 /**
1744  * pm_runtime_drop_link - Prepare for device link removal.
1745  * @link: Device link going away.
1746  *
1747  * Drop the link count of the consumer end of @link and decrement the supplier
1748  * device's runtime PM usage counter as many times as needed to drop all of the
1749  * PM runtime reference to it from the consumer.
1750  */
1751 void pm_runtime_drop_link(struct device_link *link)
1752 {
1753         if (!(link->flags & DL_FLAG_PM_RUNTIME))
1754                 return;
1755
1756         pm_runtime_drop_link_count(link->consumer);
1757
1758         while (refcount_dec_not_one(&link->rpm_active))
1759                 pm_runtime_put(link->supplier);
1760 }
1761
1762 static bool pm_runtime_need_not_resume(struct device *dev)
1763 {
1764         return atomic_read(&dev->power.usage_count) <= 1 &&
1765                 (atomic_read(&dev->power.child_count) == 0 ||
1766                  dev->power.ignore_children);
1767 }
1768
1769 /**
1770  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1771  * @dev: Device to suspend.
1772  *
1773  * Disable runtime PM so we safely can check the device's runtime PM status and
1774  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1775  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1776  * usage and children counters don't indicate that the device was in use before
1777  * the system-wide transition under way, decrement its parent's children counter
1778  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1779  * unless we encounter errors.
1780  *
1781  * Typically this function may be invoked from a system suspend callback to make
1782  * sure the device is put into low power state and it should only be used during
1783  * system-wide PM transitions to sleep states.  It assumes that the analogous
1784  * pm_runtime_force_resume() will be used to resume the device.
1785  */
1786 int pm_runtime_force_suspend(struct device *dev)
1787 {
1788         int (*callback)(struct device *);
1789         int ret;
1790
1791         pm_runtime_disable(dev);
1792         if (pm_runtime_status_suspended(dev))
1793                 return 0;
1794
1795         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1796
1797         ret = callback ? callback(dev) : 0;
1798         if (ret)
1799                 goto err;
1800
1801         /*
1802          * If the device can stay in suspend after the system-wide transition
1803          * to the working state that will follow, drop the children counter of
1804          * its parent, but set its status to RPM_SUSPENDED anyway in case this
1805          * function will be called again for it in the meantime.
1806          */
1807         if (pm_runtime_need_not_resume(dev))
1808                 pm_runtime_set_suspended(dev);
1809         else
1810                 __update_runtime_status(dev, RPM_SUSPENDED);
1811
1812         return 0;
1813
1814 err:
1815         pm_runtime_enable(dev);
1816         return ret;
1817 }
1818 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1819
1820 /**
1821  * pm_runtime_force_resume - Force a device into resume state if needed.
1822  * @dev: Device to resume.
1823  *
1824  * Prior invoking this function we expect the user to have brought the device
1825  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1826  * those actions and bring the device into full power, if it is expected to be
1827  * used on system resume.  In the other case, we defer the resume to be managed
1828  * via runtime PM.
1829  *
1830  * Typically this function may be invoked from a system resume callback.
1831  */
1832 int pm_runtime_force_resume(struct device *dev)
1833 {
1834         int (*callback)(struct device *);
1835         int ret = 0;
1836
1837         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1838                 goto out;
1839
1840         /*
1841          * The value of the parent's children counter is correct already, so
1842          * just update the status of the device.
1843          */
1844         __update_runtime_status(dev, RPM_ACTIVE);
1845
1846         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1847
1848         ret = callback ? callback(dev) : 0;
1849         if (ret) {
1850                 pm_runtime_set_suspended(dev);
1851                 goto out;
1852         }
1853
1854         pm_runtime_mark_last_busy(dev);
1855 out:
1856         pm_runtime_enable(dev);
1857         return ret;
1858 }
1859 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);