PM-runtime: Add new interface to get accounted time
[linux-2.6-microblaze.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched/mm.h>
11 #include <linux/ktime.h>
12 #include <linux/hrtimer.h>
13 #include <linux/export.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_wakeirq.h>
16 #include <trace/events/rpm.h>
17
18 #include "../base.h"
19 #include "power.h"
20
21 typedef int (*pm_callback_t)(struct device *);
22
23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24 {
25         pm_callback_t cb;
26         const struct dev_pm_ops *ops;
27
28         if (dev->pm_domain)
29                 ops = &dev->pm_domain->ops;
30         else if (dev->type && dev->type->pm)
31                 ops = dev->type->pm;
32         else if (dev->class && dev->class->pm)
33                 ops = dev->class->pm;
34         else if (dev->bus && dev->bus->pm)
35                 ops = dev->bus->pm;
36         else
37                 ops = NULL;
38
39         if (ops)
40                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
41         else
42                 cb = NULL;
43
44         if (!cb && dev->driver && dev->driver->pm)
45                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
46
47         return cb;
48 }
49
50 #define RPM_GET_CALLBACK(dev, callback) \
51                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52
53 static int rpm_resume(struct device *dev, int rpmflags);
54 static int rpm_suspend(struct device *dev, int rpmflags);
55
56 /**
57  * update_pm_runtime_accounting - Update the time accounting of power states
58  * @dev: Device to update the accounting for
59  *
60  * In order to be able to have time accounting of the various power states
61  * (as used by programs such as PowerTOP to show the effectiveness of runtime
62  * PM), we need to track the time spent in each state.
63  * update_pm_runtime_accounting must be called each time before the
64  * runtime_status field is updated, to account the time in the old state
65  * correctly.
66  */
67 void update_pm_runtime_accounting(struct device *dev)
68 {
69         unsigned long now = jiffies;
70         unsigned long delta;
71
72         delta = now - dev->power.accounting_timestamp;
73
74         dev->power.accounting_timestamp = now;
75
76         if (dev->power.disable_depth > 0)
77                 return;
78
79         if (dev->power.runtime_status == RPM_SUSPENDED)
80                 dev->power.suspended_jiffies += delta;
81         else
82                 dev->power.active_jiffies += delta;
83 }
84
85 static void __update_runtime_status(struct device *dev, enum rpm_status status)
86 {
87         update_pm_runtime_accounting(dev);
88         dev->power.runtime_status = status;
89 }
90
91 u64 pm_runtime_suspended_time(struct device *dev)
92 {
93         unsigned long flags, time;
94
95         spin_lock_irqsave(&dev->power.lock, flags);
96
97         update_pm_runtime_accounting(dev);
98         time = dev->power.suspended_jiffies;
99
100         spin_unlock_irqrestore(&dev->power.lock, flags);
101
102         return jiffies_to_nsecs(time);
103 }
104 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
105
106 /**
107  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
108  * @dev: Device to handle.
109  */
110 static void pm_runtime_deactivate_timer(struct device *dev)
111 {
112         if (dev->power.timer_expires > 0) {
113                 hrtimer_cancel(&dev->power.suspend_timer);
114                 dev->power.timer_expires = 0;
115         }
116 }
117
118 /**
119  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
120  * @dev: Device to handle.
121  */
122 static void pm_runtime_cancel_pending(struct device *dev)
123 {
124         pm_runtime_deactivate_timer(dev);
125         /*
126          * In case there's a request pending, make sure its work function will
127          * return without doing anything.
128          */
129         dev->power.request = RPM_REQ_NONE;
130 }
131
132 /*
133  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
134  * @dev: Device to handle.
135  *
136  * Compute the autosuspend-delay expiration time based on the device's
137  * power.last_busy time.  If the delay has already expired or is disabled
138  * (negative) or the power.use_autosuspend flag isn't set, return 0.
139  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
140  *
141  * This function may be called either with or without dev->power.lock held.
142  * Either way it can be racy, since power.last_busy may be updated at any time.
143  */
144 u64 pm_runtime_autosuspend_expiration(struct device *dev)
145 {
146         int autosuspend_delay;
147         u64 last_busy, expires = 0;
148         u64 now = ktime_to_ns(ktime_get());
149
150         if (!dev->power.use_autosuspend)
151                 goto out;
152
153         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
154         if (autosuspend_delay < 0)
155                 goto out;
156
157         last_busy = READ_ONCE(dev->power.last_busy);
158
159         expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
160         if (expires <= now)
161                 expires = 0;    /* Already expired. */
162
163  out:
164         return expires;
165 }
166 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
167
168 static int dev_memalloc_noio(struct device *dev, void *data)
169 {
170         return dev->power.memalloc_noio;
171 }
172
173 /*
174  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
175  * @dev: Device to handle.
176  * @enable: True for setting the flag and False for clearing the flag.
177  *
178  * Set the flag for all devices in the path from the device to the
179  * root device in the device tree if @enable is true, otherwise clear
180  * the flag for devices in the path whose siblings don't set the flag.
181  *
182  * The function should only be called by block device, or network
183  * device driver for solving the deadlock problem during runtime
184  * resume/suspend:
185  *
186  *     If memory allocation with GFP_KERNEL is called inside runtime
187  *     resume/suspend callback of any one of its ancestors(or the
188  *     block device itself), the deadlock may be triggered inside the
189  *     memory allocation since it might not complete until the block
190  *     device becomes active and the involed page I/O finishes. The
191  *     situation is pointed out first by Alan Stern. Network device
192  *     are involved in iSCSI kind of situation.
193  *
194  * The lock of dev_hotplug_mutex is held in the function for handling
195  * hotplug race because pm_runtime_set_memalloc_noio() may be called
196  * in async probe().
197  *
198  * The function should be called between device_add() and device_del()
199  * on the affected device(block/network device).
200  */
201 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
202 {
203         static DEFINE_MUTEX(dev_hotplug_mutex);
204
205         mutex_lock(&dev_hotplug_mutex);
206         for (;;) {
207                 bool enabled;
208
209                 /* hold power lock since bitfield is not SMP-safe. */
210                 spin_lock_irq(&dev->power.lock);
211                 enabled = dev->power.memalloc_noio;
212                 dev->power.memalloc_noio = enable;
213                 spin_unlock_irq(&dev->power.lock);
214
215                 /*
216                  * not need to enable ancestors any more if the device
217                  * has been enabled.
218                  */
219                 if (enabled && enable)
220                         break;
221
222                 dev = dev->parent;
223
224                 /*
225                  * clear flag of the parent device only if all the
226                  * children don't set the flag because ancestor's
227                  * flag was set by any one of the descendants.
228                  */
229                 if (!dev || (!enable &&
230                              device_for_each_child(dev, NULL,
231                                                    dev_memalloc_noio)))
232                         break;
233         }
234         mutex_unlock(&dev_hotplug_mutex);
235 }
236 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
237
238 /**
239  * rpm_check_suspend_allowed - Test whether a device may be suspended.
240  * @dev: Device to test.
241  */
242 static int rpm_check_suspend_allowed(struct device *dev)
243 {
244         int retval = 0;
245
246         if (dev->power.runtime_error)
247                 retval = -EINVAL;
248         else if (dev->power.disable_depth > 0)
249                 retval = -EACCES;
250         else if (atomic_read(&dev->power.usage_count) > 0)
251                 retval = -EAGAIN;
252         else if (!dev->power.ignore_children &&
253                         atomic_read(&dev->power.child_count))
254                 retval = -EBUSY;
255
256         /* Pending resume requests take precedence over suspends. */
257         else if ((dev->power.deferred_resume
258                         && dev->power.runtime_status == RPM_SUSPENDING)
259             || (dev->power.request_pending
260                         && dev->power.request == RPM_REQ_RESUME))
261                 retval = -EAGAIN;
262         else if (__dev_pm_qos_read_value(dev) == 0)
263                 retval = -EPERM;
264         else if (dev->power.runtime_status == RPM_SUSPENDED)
265                 retval = 1;
266
267         return retval;
268 }
269
270 static int rpm_get_suppliers(struct device *dev)
271 {
272         struct device_link *link;
273
274         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
275                 int retval;
276
277                 if (!(link->flags & DL_FLAG_PM_RUNTIME))
278                         continue;
279
280                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
281                     link->rpm_active)
282                         continue;
283
284                 retval = pm_runtime_get_sync(link->supplier);
285                 /* Ignore suppliers with disabled runtime PM. */
286                 if (retval < 0 && retval != -EACCES) {
287                         pm_runtime_put_noidle(link->supplier);
288                         return retval;
289                 }
290                 link->rpm_active = true;
291         }
292         return 0;
293 }
294
295 static void rpm_put_suppliers(struct device *dev)
296 {
297         struct device_link *link;
298
299         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
300                 if (link->rpm_active &&
301                     READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
302                         pm_runtime_put(link->supplier);
303                         link->rpm_active = false;
304                 }
305 }
306
307 /**
308  * __rpm_callback - Run a given runtime PM callback for a given device.
309  * @cb: Runtime PM callback to run.
310  * @dev: Device to run the callback for.
311  */
312 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
313         __releases(&dev->power.lock) __acquires(&dev->power.lock)
314 {
315         int retval, idx;
316         bool use_links = dev->power.links_count > 0;
317
318         if (dev->power.irq_safe) {
319                 spin_unlock(&dev->power.lock);
320         } else {
321                 spin_unlock_irq(&dev->power.lock);
322
323                 /*
324                  * Resume suppliers if necessary.
325                  *
326                  * The device's runtime PM status cannot change until this
327                  * routine returns, so it is safe to read the status outside of
328                  * the lock.
329                  */
330                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
331                         idx = device_links_read_lock();
332
333                         retval = rpm_get_suppliers(dev);
334                         if (retval)
335                                 goto fail;
336
337                         device_links_read_unlock(idx);
338                 }
339         }
340
341         retval = cb(dev);
342
343         if (dev->power.irq_safe) {
344                 spin_lock(&dev->power.lock);
345         } else {
346                 /*
347                  * If the device is suspending and the callback has returned
348                  * success, drop the usage counters of the suppliers that have
349                  * been reference counted on its resume.
350                  *
351                  * Do that if resume fails too.
352                  */
353                 if (use_links
354                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
355                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
356                         idx = device_links_read_lock();
357
358  fail:
359                         rpm_put_suppliers(dev);
360
361                         device_links_read_unlock(idx);
362                 }
363
364                 spin_lock_irq(&dev->power.lock);
365         }
366
367         return retval;
368 }
369
370 /**
371  * rpm_idle - Notify device bus type if the device can be suspended.
372  * @dev: Device to notify the bus type about.
373  * @rpmflags: Flag bits.
374  *
375  * Check if the device's runtime PM status allows it to be suspended.  If
376  * another idle notification has been started earlier, return immediately.  If
377  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
378  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
379  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
380  *
381  * This function must be called under dev->power.lock with interrupts disabled.
382  */
383 static int rpm_idle(struct device *dev, int rpmflags)
384 {
385         int (*callback)(struct device *);
386         int retval;
387
388         trace_rpm_idle_rcuidle(dev, rpmflags);
389         retval = rpm_check_suspend_allowed(dev);
390         if (retval < 0)
391                 ;       /* Conditions are wrong. */
392
393         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
394         else if (dev->power.runtime_status != RPM_ACTIVE)
395                 retval = -EAGAIN;
396
397         /*
398          * Any pending request other than an idle notification takes
399          * precedence over us, except that the timer may be running.
400          */
401         else if (dev->power.request_pending &&
402             dev->power.request > RPM_REQ_IDLE)
403                 retval = -EAGAIN;
404
405         /* Act as though RPM_NOWAIT is always set. */
406         else if (dev->power.idle_notification)
407                 retval = -EINPROGRESS;
408         if (retval)
409                 goto out;
410
411         /* Pending requests need to be canceled. */
412         dev->power.request = RPM_REQ_NONE;
413
414         if (dev->power.no_callbacks)
415                 goto out;
416
417         /* Carry out an asynchronous or a synchronous idle notification. */
418         if (rpmflags & RPM_ASYNC) {
419                 dev->power.request = RPM_REQ_IDLE;
420                 if (!dev->power.request_pending) {
421                         dev->power.request_pending = true;
422                         queue_work(pm_wq, &dev->power.work);
423                 }
424                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
425                 return 0;
426         }
427
428         dev->power.idle_notification = true;
429
430         callback = RPM_GET_CALLBACK(dev, runtime_idle);
431
432         if (callback)
433                 retval = __rpm_callback(callback, dev);
434
435         dev->power.idle_notification = false;
436         wake_up_all(&dev->power.wait_queue);
437
438  out:
439         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
440         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
441 }
442
443 /**
444  * rpm_callback - Run a given runtime PM callback for a given device.
445  * @cb: Runtime PM callback to run.
446  * @dev: Device to run the callback for.
447  */
448 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
449 {
450         int retval;
451
452         if (!cb)
453                 return -ENOSYS;
454
455         if (dev->power.memalloc_noio) {
456                 unsigned int noio_flag;
457
458                 /*
459                  * Deadlock might be caused if memory allocation with
460                  * GFP_KERNEL happens inside runtime_suspend and
461                  * runtime_resume callbacks of one block device's
462                  * ancestor or the block device itself. Network
463                  * device might be thought as part of iSCSI block
464                  * device, so network device and its ancestor should
465                  * be marked as memalloc_noio too.
466                  */
467                 noio_flag = memalloc_noio_save();
468                 retval = __rpm_callback(cb, dev);
469                 memalloc_noio_restore(noio_flag);
470         } else {
471                 retval = __rpm_callback(cb, dev);
472         }
473
474         dev->power.runtime_error = retval;
475         return retval != -EACCES ? retval : -EIO;
476 }
477
478 /**
479  * rpm_suspend - Carry out runtime suspend of given device.
480  * @dev: Device to suspend.
481  * @rpmflags: Flag bits.
482  *
483  * Check if the device's runtime PM status allows it to be suspended.
484  * Cancel a pending idle notification, autosuspend or suspend. If
485  * another suspend has been started earlier, either return immediately
486  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
487  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
488  * otherwise run the ->runtime_suspend() callback directly. When
489  * ->runtime_suspend succeeded, if a deferred resume was requested while
490  * the callback was running then carry it out, otherwise send an idle
491  * notification for its parent (if the suspend succeeded and both
492  * ignore_children of parent->power and irq_safe of dev->power are not set).
493  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
494  * flag is set and the next autosuspend-delay expiration time is in the
495  * future, schedule another autosuspend attempt.
496  *
497  * This function must be called under dev->power.lock with interrupts disabled.
498  */
499 static int rpm_suspend(struct device *dev, int rpmflags)
500         __releases(&dev->power.lock) __acquires(&dev->power.lock)
501 {
502         int (*callback)(struct device *);
503         struct device *parent = NULL;
504         int retval;
505
506         trace_rpm_suspend_rcuidle(dev, rpmflags);
507
508  repeat:
509         retval = rpm_check_suspend_allowed(dev);
510
511         if (retval < 0)
512                 ;       /* Conditions are wrong. */
513
514         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
515         else if (dev->power.runtime_status == RPM_RESUMING &&
516             !(rpmflags & RPM_ASYNC))
517                 retval = -EAGAIN;
518         if (retval)
519                 goto out;
520
521         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
522         if ((rpmflags & RPM_AUTO)
523             && dev->power.runtime_status != RPM_SUSPENDING) {
524                 u64 expires = pm_runtime_autosuspend_expiration(dev);
525
526                 if (expires != 0) {
527                         /* Pending requests need to be canceled. */
528                         dev->power.request = RPM_REQ_NONE;
529
530                         /*
531                          * Optimization: If the timer is already running and is
532                          * set to expire at or before the autosuspend delay,
533                          * avoid the overhead of resetting it.  Just let it
534                          * expire; pm_suspend_timer_fn() will take care of the
535                          * rest.
536                          */
537                         if (!(dev->power.timer_expires &&
538                                         dev->power.timer_expires <= expires)) {
539                                 /*
540                                  * We add a slack of 25% to gather wakeups
541                                  * without sacrificing the granularity.
542                                  */
543                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
544                                                     (NSEC_PER_MSEC >> 2);
545
546                                 dev->power.timer_expires = expires;
547                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
548                                                 ns_to_ktime(expires),
549                                                 slack,
550                                                 HRTIMER_MODE_ABS);
551                         }
552                         dev->power.timer_autosuspends = 1;
553                         goto out;
554                 }
555         }
556
557         /* Other scheduled or pending requests need to be canceled. */
558         pm_runtime_cancel_pending(dev);
559
560         if (dev->power.runtime_status == RPM_SUSPENDING) {
561                 DEFINE_WAIT(wait);
562
563                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
564                         retval = -EINPROGRESS;
565                         goto out;
566                 }
567
568                 if (dev->power.irq_safe) {
569                         spin_unlock(&dev->power.lock);
570
571                         cpu_relax();
572
573                         spin_lock(&dev->power.lock);
574                         goto repeat;
575                 }
576
577                 /* Wait for the other suspend running in parallel with us. */
578                 for (;;) {
579                         prepare_to_wait(&dev->power.wait_queue, &wait,
580                                         TASK_UNINTERRUPTIBLE);
581                         if (dev->power.runtime_status != RPM_SUSPENDING)
582                                 break;
583
584                         spin_unlock_irq(&dev->power.lock);
585
586                         schedule();
587
588                         spin_lock_irq(&dev->power.lock);
589                 }
590                 finish_wait(&dev->power.wait_queue, &wait);
591                 goto repeat;
592         }
593
594         if (dev->power.no_callbacks)
595                 goto no_callback;       /* Assume success. */
596
597         /* Carry out an asynchronous or a synchronous suspend. */
598         if (rpmflags & RPM_ASYNC) {
599                 dev->power.request = (rpmflags & RPM_AUTO) ?
600                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
601                 if (!dev->power.request_pending) {
602                         dev->power.request_pending = true;
603                         queue_work(pm_wq, &dev->power.work);
604                 }
605                 goto out;
606         }
607
608         __update_runtime_status(dev, RPM_SUSPENDING);
609
610         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
611
612         dev_pm_enable_wake_irq_check(dev, true);
613         retval = rpm_callback(callback, dev);
614         if (retval)
615                 goto fail;
616
617  no_callback:
618         __update_runtime_status(dev, RPM_SUSPENDED);
619         pm_runtime_deactivate_timer(dev);
620
621         if (dev->parent) {
622                 parent = dev->parent;
623                 atomic_add_unless(&parent->power.child_count, -1, 0);
624         }
625         wake_up_all(&dev->power.wait_queue);
626
627         if (dev->power.deferred_resume) {
628                 dev->power.deferred_resume = false;
629                 rpm_resume(dev, 0);
630                 retval = -EAGAIN;
631                 goto out;
632         }
633
634         /* Maybe the parent is now able to suspend. */
635         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
636                 spin_unlock(&dev->power.lock);
637
638                 spin_lock(&parent->power.lock);
639                 rpm_idle(parent, RPM_ASYNC);
640                 spin_unlock(&parent->power.lock);
641
642                 spin_lock(&dev->power.lock);
643         }
644
645  out:
646         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
647
648         return retval;
649
650  fail:
651         dev_pm_disable_wake_irq_check(dev);
652         __update_runtime_status(dev, RPM_ACTIVE);
653         dev->power.deferred_resume = false;
654         wake_up_all(&dev->power.wait_queue);
655
656         if (retval == -EAGAIN || retval == -EBUSY) {
657                 dev->power.runtime_error = 0;
658
659                 /*
660                  * If the callback routine failed an autosuspend, and
661                  * if the last_busy time has been updated so that there
662                  * is a new autosuspend expiration time, automatically
663                  * reschedule another autosuspend.
664                  */
665                 if ((rpmflags & RPM_AUTO) &&
666                     pm_runtime_autosuspend_expiration(dev) != 0)
667                         goto repeat;
668         } else {
669                 pm_runtime_cancel_pending(dev);
670         }
671         goto out;
672 }
673
674 /**
675  * rpm_resume - Carry out runtime resume of given device.
676  * @dev: Device to resume.
677  * @rpmflags: Flag bits.
678  *
679  * Check if the device's runtime PM status allows it to be resumed.  Cancel
680  * any scheduled or pending requests.  If another resume has been started
681  * earlier, either return immediately or wait for it to finish, depending on the
682  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
683  * parallel with this function, either tell the other process to resume after
684  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
685  * flag is set then queue a resume request; otherwise run the
686  * ->runtime_resume() callback directly.  Queue an idle notification for the
687  * device if the resume succeeded.
688  *
689  * This function must be called under dev->power.lock with interrupts disabled.
690  */
691 static int rpm_resume(struct device *dev, int rpmflags)
692         __releases(&dev->power.lock) __acquires(&dev->power.lock)
693 {
694         int (*callback)(struct device *);
695         struct device *parent = NULL;
696         int retval = 0;
697
698         trace_rpm_resume_rcuidle(dev, rpmflags);
699
700  repeat:
701         if (dev->power.runtime_error)
702                 retval = -EINVAL;
703         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
704             && dev->power.runtime_status == RPM_ACTIVE)
705                 retval = 1;
706         else if (dev->power.disable_depth > 0)
707                 retval = -EACCES;
708         if (retval)
709                 goto out;
710
711         /*
712          * Other scheduled or pending requests need to be canceled.  Small
713          * optimization: If an autosuspend timer is running, leave it running
714          * rather than cancelling it now only to restart it again in the near
715          * future.
716          */
717         dev->power.request = RPM_REQ_NONE;
718         if (!dev->power.timer_autosuspends)
719                 pm_runtime_deactivate_timer(dev);
720
721         if (dev->power.runtime_status == RPM_ACTIVE) {
722                 retval = 1;
723                 goto out;
724         }
725
726         if (dev->power.runtime_status == RPM_RESUMING
727             || dev->power.runtime_status == RPM_SUSPENDING) {
728                 DEFINE_WAIT(wait);
729
730                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
731                         if (dev->power.runtime_status == RPM_SUSPENDING)
732                                 dev->power.deferred_resume = true;
733                         else
734                                 retval = -EINPROGRESS;
735                         goto out;
736                 }
737
738                 if (dev->power.irq_safe) {
739                         spin_unlock(&dev->power.lock);
740
741                         cpu_relax();
742
743                         spin_lock(&dev->power.lock);
744                         goto repeat;
745                 }
746
747                 /* Wait for the operation carried out in parallel with us. */
748                 for (;;) {
749                         prepare_to_wait(&dev->power.wait_queue, &wait,
750                                         TASK_UNINTERRUPTIBLE);
751                         if (dev->power.runtime_status != RPM_RESUMING
752                             && dev->power.runtime_status != RPM_SUSPENDING)
753                                 break;
754
755                         spin_unlock_irq(&dev->power.lock);
756
757                         schedule();
758
759                         spin_lock_irq(&dev->power.lock);
760                 }
761                 finish_wait(&dev->power.wait_queue, &wait);
762                 goto repeat;
763         }
764
765         /*
766          * See if we can skip waking up the parent.  This is safe only if
767          * power.no_callbacks is set, because otherwise we don't know whether
768          * the resume will actually succeed.
769          */
770         if (dev->power.no_callbacks && !parent && dev->parent) {
771                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
772                 if (dev->parent->power.disable_depth > 0
773                     || dev->parent->power.ignore_children
774                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
775                         atomic_inc(&dev->parent->power.child_count);
776                         spin_unlock(&dev->parent->power.lock);
777                         retval = 1;
778                         goto no_callback;       /* Assume success. */
779                 }
780                 spin_unlock(&dev->parent->power.lock);
781         }
782
783         /* Carry out an asynchronous or a synchronous resume. */
784         if (rpmflags & RPM_ASYNC) {
785                 dev->power.request = RPM_REQ_RESUME;
786                 if (!dev->power.request_pending) {
787                         dev->power.request_pending = true;
788                         queue_work(pm_wq, &dev->power.work);
789                 }
790                 retval = 0;
791                 goto out;
792         }
793
794         if (!parent && dev->parent) {
795                 /*
796                  * Increment the parent's usage counter and resume it if
797                  * necessary.  Not needed if dev is irq-safe; then the
798                  * parent is permanently resumed.
799                  */
800                 parent = dev->parent;
801                 if (dev->power.irq_safe)
802                         goto skip_parent;
803                 spin_unlock(&dev->power.lock);
804
805                 pm_runtime_get_noresume(parent);
806
807                 spin_lock(&parent->power.lock);
808                 /*
809                  * Resume the parent if it has runtime PM enabled and not been
810                  * set to ignore its children.
811                  */
812                 if (!parent->power.disable_depth
813                     && !parent->power.ignore_children) {
814                         rpm_resume(parent, 0);
815                         if (parent->power.runtime_status != RPM_ACTIVE)
816                                 retval = -EBUSY;
817                 }
818                 spin_unlock(&parent->power.lock);
819
820                 spin_lock(&dev->power.lock);
821                 if (retval)
822                         goto out;
823                 goto repeat;
824         }
825  skip_parent:
826
827         if (dev->power.no_callbacks)
828                 goto no_callback;       /* Assume success. */
829
830         __update_runtime_status(dev, RPM_RESUMING);
831
832         callback = RPM_GET_CALLBACK(dev, runtime_resume);
833
834         dev_pm_disable_wake_irq_check(dev);
835         retval = rpm_callback(callback, dev);
836         if (retval) {
837                 __update_runtime_status(dev, RPM_SUSPENDED);
838                 pm_runtime_cancel_pending(dev);
839                 dev_pm_enable_wake_irq_check(dev, false);
840         } else {
841  no_callback:
842                 __update_runtime_status(dev, RPM_ACTIVE);
843                 pm_runtime_mark_last_busy(dev);
844                 if (parent)
845                         atomic_inc(&parent->power.child_count);
846         }
847         wake_up_all(&dev->power.wait_queue);
848
849         if (retval >= 0)
850                 rpm_idle(dev, RPM_ASYNC);
851
852  out:
853         if (parent && !dev->power.irq_safe) {
854                 spin_unlock_irq(&dev->power.lock);
855
856                 pm_runtime_put(parent);
857
858                 spin_lock_irq(&dev->power.lock);
859         }
860
861         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
862
863         return retval;
864 }
865
866 /**
867  * pm_runtime_work - Universal runtime PM work function.
868  * @work: Work structure used for scheduling the execution of this function.
869  *
870  * Use @work to get the device object the work is to be done for, determine what
871  * is to be done and execute the appropriate runtime PM function.
872  */
873 static void pm_runtime_work(struct work_struct *work)
874 {
875         struct device *dev = container_of(work, struct device, power.work);
876         enum rpm_request req;
877
878         spin_lock_irq(&dev->power.lock);
879
880         if (!dev->power.request_pending)
881                 goto out;
882
883         req = dev->power.request;
884         dev->power.request = RPM_REQ_NONE;
885         dev->power.request_pending = false;
886
887         switch (req) {
888         case RPM_REQ_NONE:
889                 break;
890         case RPM_REQ_IDLE:
891                 rpm_idle(dev, RPM_NOWAIT);
892                 break;
893         case RPM_REQ_SUSPEND:
894                 rpm_suspend(dev, RPM_NOWAIT);
895                 break;
896         case RPM_REQ_AUTOSUSPEND:
897                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
898                 break;
899         case RPM_REQ_RESUME:
900                 rpm_resume(dev, RPM_NOWAIT);
901                 break;
902         }
903
904  out:
905         spin_unlock_irq(&dev->power.lock);
906 }
907
908 /**
909  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
910  * @data: Device pointer passed by pm_schedule_suspend().
911  *
912  * Check if the time is right and queue a suspend request.
913  */
914 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
915 {
916         struct device *dev = container_of(timer, struct device, power.suspend_timer);
917         unsigned long flags;
918         u64 expires;
919
920         spin_lock_irqsave(&dev->power.lock, flags);
921
922         expires = dev->power.timer_expires;
923         /*
924          * If 'expires' is after the current time, we've been called
925          * too early.
926          */
927         if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
928                 dev->power.timer_expires = 0;
929                 rpm_suspend(dev, dev->power.timer_autosuspends ?
930                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
931         }
932
933         spin_unlock_irqrestore(&dev->power.lock, flags);
934
935         return HRTIMER_NORESTART;
936 }
937
938 /**
939  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
940  * @dev: Device to suspend.
941  * @delay: Time to wait before submitting a suspend request, in milliseconds.
942  */
943 int pm_schedule_suspend(struct device *dev, unsigned int delay)
944 {
945         unsigned long flags;
946         ktime_t expires;
947         int retval;
948
949         spin_lock_irqsave(&dev->power.lock, flags);
950
951         if (!delay) {
952                 retval = rpm_suspend(dev, RPM_ASYNC);
953                 goto out;
954         }
955
956         retval = rpm_check_suspend_allowed(dev);
957         if (retval)
958                 goto out;
959
960         /* Other scheduled or pending requests need to be canceled. */
961         pm_runtime_cancel_pending(dev);
962
963         expires = ktime_add(ktime_get(), ms_to_ktime(delay));
964         dev->power.timer_expires = ktime_to_ns(expires);
965         dev->power.timer_autosuspends = 0;
966         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
967
968  out:
969         spin_unlock_irqrestore(&dev->power.lock, flags);
970
971         return retval;
972 }
973 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
974
975 /**
976  * __pm_runtime_idle - Entry point for runtime idle operations.
977  * @dev: Device to send idle notification for.
978  * @rpmflags: Flag bits.
979  *
980  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
981  * return immediately if it is larger than zero.  Then carry out an idle
982  * notification, either synchronous or asynchronous.
983  *
984  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
985  * or if pm_runtime_irq_safe() has been called.
986  */
987 int __pm_runtime_idle(struct device *dev, int rpmflags)
988 {
989         unsigned long flags;
990         int retval;
991
992         if (rpmflags & RPM_GET_PUT) {
993                 if (!atomic_dec_and_test(&dev->power.usage_count))
994                         return 0;
995         }
996
997         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
998
999         spin_lock_irqsave(&dev->power.lock, flags);
1000         retval = rpm_idle(dev, rpmflags);
1001         spin_unlock_irqrestore(&dev->power.lock, flags);
1002
1003         return retval;
1004 }
1005 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1006
1007 /**
1008  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1009  * @dev: Device to suspend.
1010  * @rpmflags: Flag bits.
1011  *
1012  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1013  * return immediately if it is larger than zero.  Then carry out a suspend,
1014  * either synchronous or asynchronous.
1015  *
1016  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1017  * or if pm_runtime_irq_safe() has been called.
1018  */
1019 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1020 {
1021         unsigned long flags;
1022         int retval;
1023
1024         if (rpmflags & RPM_GET_PUT) {
1025                 if (!atomic_dec_and_test(&dev->power.usage_count))
1026                         return 0;
1027         }
1028
1029         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1030
1031         spin_lock_irqsave(&dev->power.lock, flags);
1032         retval = rpm_suspend(dev, rpmflags);
1033         spin_unlock_irqrestore(&dev->power.lock, flags);
1034
1035         return retval;
1036 }
1037 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1038
1039 /**
1040  * __pm_runtime_resume - Entry point for runtime resume operations.
1041  * @dev: Device to resume.
1042  * @rpmflags: Flag bits.
1043  *
1044  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1045  * carry out a resume, either synchronous or asynchronous.
1046  *
1047  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1048  * or if pm_runtime_irq_safe() has been called.
1049  */
1050 int __pm_runtime_resume(struct device *dev, int rpmflags)
1051 {
1052         unsigned long flags;
1053         int retval;
1054
1055         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1056                         dev->power.runtime_status != RPM_ACTIVE);
1057
1058         if (rpmflags & RPM_GET_PUT)
1059                 atomic_inc(&dev->power.usage_count);
1060
1061         spin_lock_irqsave(&dev->power.lock, flags);
1062         retval = rpm_resume(dev, rpmflags);
1063         spin_unlock_irqrestore(&dev->power.lock, flags);
1064
1065         return retval;
1066 }
1067 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1068
1069 /**
1070  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1071  * @dev: Device to handle.
1072  *
1073  * Return -EINVAL if runtime PM is disabled for the device.
1074  *
1075  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1076  * and the runtime PM usage counter is nonzero, increment the counter and
1077  * return 1.  Otherwise return 0 without changing the counter.
1078  */
1079 int pm_runtime_get_if_in_use(struct device *dev)
1080 {
1081         unsigned long flags;
1082         int retval;
1083
1084         spin_lock_irqsave(&dev->power.lock, flags);
1085         retval = dev->power.disable_depth > 0 ? -EINVAL :
1086                 dev->power.runtime_status == RPM_ACTIVE
1087                         && atomic_inc_not_zero(&dev->power.usage_count);
1088         spin_unlock_irqrestore(&dev->power.lock, flags);
1089         return retval;
1090 }
1091 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1092
1093 /**
1094  * __pm_runtime_set_status - Set runtime PM status of a device.
1095  * @dev: Device to handle.
1096  * @status: New runtime PM status of the device.
1097  *
1098  * If runtime PM of the device is disabled or its power.runtime_error field is
1099  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1100  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1101  * However, if the device has a parent and the parent is not active, and the
1102  * parent's power.ignore_children flag is unset, the device's status cannot be
1103  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1104  *
1105  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1106  * and the device parent's counter of unsuspended children is modified to
1107  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1108  * notification request for the parent is submitted.
1109  */
1110 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1111 {
1112         struct device *parent = dev->parent;
1113         unsigned long flags;
1114         bool notify_parent = false;
1115         int error = 0;
1116
1117         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1118                 return -EINVAL;
1119
1120         spin_lock_irqsave(&dev->power.lock, flags);
1121
1122         if (!dev->power.runtime_error && !dev->power.disable_depth) {
1123                 error = -EAGAIN;
1124                 goto out;
1125         }
1126
1127         if (dev->power.runtime_status == status || !parent)
1128                 goto out_set;
1129
1130         if (status == RPM_SUSPENDED) {
1131                 atomic_add_unless(&parent->power.child_count, -1, 0);
1132                 notify_parent = !parent->power.ignore_children;
1133         } else {
1134                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1135
1136                 /*
1137                  * It is invalid to put an active child under a parent that is
1138                  * not active, has runtime PM enabled and the
1139                  * 'power.ignore_children' flag unset.
1140                  */
1141                 if (!parent->power.disable_depth
1142                     && !parent->power.ignore_children
1143                     && parent->power.runtime_status != RPM_ACTIVE) {
1144                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1145                                 dev_name(dev),
1146                                 dev_name(parent));
1147                         error = -EBUSY;
1148                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1149                         atomic_inc(&parent->power.child_count);
1150                 }
1151
1152                 spin_unlock(&parent->power.lock);
1153
1154                 if (error)
1155                         goto out;
1156         }
1157
1158  out_set:
1159         __update_runtime_status(dev, status);
1160         dev->power.runtime_error = 0;
1161  out:
1162         spin_unlock_irqrestore(&dev->power.lock, flags);
1163
1164         if (notify_parent)
1165                 pm_request_idle(parent);
1166
1167         return error;
1168 }
1169 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1170
1171 /**
1172  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1173  * @dev: Device to handle.
1174  *
1175  * Flush all pending requests for the device from pm_wq and wait for all
1176  * runtime PM operations involving the device in progress to complete.
1177  *
1178  * Should be called under dev->power.lock with interrupts disabled.
1179  */
1180 static void __pm_runtime_barrier(struct device *dev)
1181 {
1182         pm_runtime_deactivate_timer(dev);
1183
1184         if (dev->power.request_pending) {
1185                 dev->power.request = RPM_REQ_NONE;
1186                 spin_unlock_irq(&dev->power.lock);
1187
1188                 cancel_work_sync(&dev->power.work);
1189
1190                 spin_lock_irq(&dev->power.lock);
1191                 dev->power.request_pending = false;
1192         }
1193
1194         if (dev->power.runtime_status == RPM_SUSPENDING
1195             || dev->power.runtime_status == RPM_RESUMING
1196             || dev->power.idle_notification) {
1197                 DEFINE_WAIT(wait);
1198
1199                 /* Suspend, wake-up or idle notification in progress. */
1200                 for (;;) {
1201                         prepare_to_wait(&dev->power.wait_queue, &wait,
1202                                         TASK_UNINTERRUPTIBLE);
1203                         if (dev->power.runtime_status != RPM_SUSPENDING
1204                             && dev->power.runtime_status != RPM_RESUMING
1205                             && !dev->power.idle_notification)
1206                                 break;
1207                         spin_unlock_irq(&dev->power.lock);
1208
1209                         schedule();
1210
1211                         spin_lock_irq(&dev->power.lock);
1212                 }
1213                 finish_wait(&dev->power.wait_queue, &wait);
1214         }
1215 }
1216
1217 /**
1218  * pm_runtime_barrier - Flush pending requests and wait for completions.
1219  * @dev: Device to handle.
1220  *
1221  * Prevent the device from being suspended by incrementing its usage counter and
1222  * if there's a pending resume request for the device, wake the device up.
1223  * Next, make sure that all pending requests for the device have been flushed
1224  * from pm_wq and wait for all runtime PM operations involving the device in
1225  * progress to complete.
1226  *
1227  * Return value:
1228  * 1, if there was a resume request pending and the device had to be woken up,
1229  * 0, otherwise
1230  */
1231 int pm_runtime_barrier(struct device *dev)
1232 {
1233         int retval = 0;
1234
1235         pm_runtime_get_noresume(dev);
1236         spin_lock_irq(&dev->power.lock);
1237
1238         if (dev->power.request_pending
1239             && dev->power.request == RPM_REQ_RESUME) {
1240                 rpm_resume(dev, 0);
1241                 retval = 1;
1242         }
1243
1244         __pm_runtime_barrier(dev);
1245
1246         spin_unlock_irq(&dev->power.lock);
1247         pm_runtime_put_noidle(dev);
1248
1249         return retval;
1250 }
1251 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1252
1253 /**
1254  * __pm_runtime_disable - Disable runtime PM of a device.
1255  * @dev: Device to handle.
1256  * @check_resume: If set, check if there's a resume request for the device.
1257  *
1258  * Increment power.disable_depth for the device and if it was zero previously,
1259  * cancel all pending runtime PM requests for the device and wait for all
1260  * operations in progress to complete.  The device can be either active or
1261  * suspended after its runtime PM has been disabled.
1262  *
1263  * If @check_resume is set and there's a resume request pending when
1264  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1265  * function will wake up the device before disabling its runtime PM.
1266  */
1267 void __pm_runtime_disable(struct device *dev, bool check_resume)
1268 {
1269         spin_lock_irq(&dev->power.lock);
1270
1271         if (dev->power.disable_depth > 0) {
1272                 dev->power.disable_depth++;
1273                 goto out;
1274         }
1275
1276         /*
1277          * Wake up the device if there's a resume request pending, because that
1278          * means there probably is some I/O to process and disabling runtime PM
1279          * shouldn't prevent the device from processing the I/O.
1280          */
1281         if (check_resume && dev->power.request_pending
1282             && dev->power.request == RPM_REQ_RESUME) {
1283                 /*
1284                  * Prevent suspends and idle notifications from being carried
1285                  * out after we have woken up the device.
1286                  */
1287                 pm_runtime_get_noresume(dev);
1288
1289                 rpm_resume(dev, 0);
1290
1291                 pm_runtime_put_noidle(dev);
1292         }
1293
1294         if (!dev->power.disable_depth++)
1295                 __pm_runtime_barrier(dev);
1296
1297  out:
1298         spin_unlock_irq(&dev->power.lock);
1299 }
1300 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1301
1302 /**
1303  * pm_runtime_enable - Enable runtime PM of a device.
1304  * @dev: Device to handle.
1305  */
1306 void pm_runtime_enable(struct device *dev)
1307 {
1308         unsigned long flags;
1309
1310         spin_lock_irqsave(&dev->power.lock, flags);
1311
1312         if (dev->power.disable_depth > 0)
1313                 dev->power.disable_depth--;
1314         else
1315                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1316
1317         WARN(!dev->power.disable_depth &&
1318              dev->power.runtime_status == RPM_SUSPENDED &&
1319              !dev->power.ignore_children &&
1320              atomic_read(&dev->power.child_count) > 0,
1321              "Enabling runtime PM for inactive device (%s) with active children\n",
1322              dev_name(dev));
1323
1324         spin_unlock_irqrestore(&dev->power.lock, flags);
1325 }
1326 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1327
1328 /**
1329  * pm_runtime_forbid - Block runtime PM of a device.
1330  * @dev: Device to handle.
1331  *
1332  * Increase the device's usage count and clear its power.runtime_auto flag,
1333  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1334  * for it.
1335  */
1336 void pm_runtime_forbid(struct device *dev)
1337 {
1338         spin_lock_irq(&dev->power.lock);
1339         if (!dev->power.runtime_auto)
1340                 goto out;
1341
1342         dev->power.runtime_auto = false;
1343         atomic_inc(&dev->power.usage_count);
1344         rpm_resume(dev, 0);
1345
1346  out:
1347         spin_unlock_irq(&dev->power.lock);
1348 }
1349 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1350
1351 /**
1352  * pm_runtime_allow - Unblock runtime PM of a device.
1353  * @dev: Device to handle.
1354  *
1355  * Decrease the device's usage count and set its power.runtime_auto flag.
1356  */
1357 void pm_runtime_allow(struct device *dev)
1358 {
1359         spin_lock_irq(&dev->power.lock);
1360         if (dev->power.runtime_auto)
1361                 goto out;
1362
1363         dev->power.runtime_auto = true;
1364         if (atomic_dec_and_test(&dev->power.usage_count))
1365                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1366
1367  out:
1368         spin_unlock_irq(&dev->power.lock);
1369 }
1370 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1371
1372 /**
1373  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1374  * @dev: Device to handle.
1375  *
1376  * Set the power.no_callbacks flag, which tells the PM core that this
1377  * device is power-managed through its parent and has no runtime PM
1378  * callbacks of its own.  The runtime sysfs attributes will be removed.
1379  */
1380 void pm_runtime_no_callbacks(struct device *dev)
1381 {
1382         spin_lock_irq(&dev->power.lock);
1383         dev->power.no_callbacks = 1;
1384         spin_unlock_irq(&dev->power.lock);
1385         if (device_is_registered(dev))
1386                 rpm_sysfs_remove(dev);
1387 }
1388 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1389
1390 /**
1391  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1392  * @dev: Device to handle
1393  *
1394  * Set the power.irq_safe flag, which tells the PM core that the
1395  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1396  * always be invoked with the spinlock held and interrupts disabled.  It also
1397  * causes the parent's usage counter to be permanently incremented, preventing
1398  * the parent from runtime suspending -- otherwise an irq-safe child might have
1399  * to wait for a non-irq-safe parent.
1400  */
1401 void pm_runtime_irq_safe(struct device *dev)
1402 {
1403         if (dev->parent)
1404                 pm_runtime_get_sync(dev->parent);
1405         spin_lock_irq(&dev->power.lock);
1406         dev->power.irq_safe = 1;
1407         spin_unlock_irq(&dev->power.lock);
1408 }
1409 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1410
1411 /**
1412  * update_autosuspend - Handle a change to a device's autosuspend settings.
1413  * @dev: Device to handle.
1414  * @old_delay: The former autosuspend_delay value.
1415  * @old_use: The former use_autosuspend value.
1416  *
1417  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1418  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1419  *
1420  * This function must be called under dev->power.lock with interrupts disabled.
1421  */
1422 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1423 {
1424         int delay = dev->power.autosuspend_delay;
1425
1426         /* Should runtime suspend be prevented now? */
1427         if (dev->power.use_autosuspend && delay < 0) {
1428
1429                 /* If it used to be allowed then prevent it. */
1430                 if (!old_use || old_delay >= 0) {
1431                         atomic_inc(&dev->power.usage_count);
1432                         rpm_resume(dev, 0);
1433                 }
1434         }
1435
1436         /* Runtime suspend should be allowed now. */
1437         else {
1438
1439                 /* If it used to be prevented then allow it. */
1440                 if (old_use && old_delay < 0)
1441                         atomic_dec(&dev->power.usage_count);
1442
1443                 /* Maybe we can autosuspend now. */
1444                 rpm_idle(dev, RPM_AUTO);
1445         }
1446 }
1447
1448 /**
1449  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1450  * @dev: Device to handle.
1451  * @delay: Value of the new delay in milliseconds.
1452  *
1453  * Set the device's power.autosuspend_delay value.  If it changes to negative
1454  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1455  * changes the other way, allow runtime suspends.
1456  */
1457 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1458 {
1459         int old_delay, old_use;
1460
1461         spin_lock_irq(&dev->power.lock);
1462         old_delay = dev->power.autosuspend_delay;
1463         old_use = dev->power.use_autosuspend;
1464         dev->power.autosuspend_delay = delay;
1465         update_autosuspend(dev, old_delay, old_use);
1466         spin_unlock_irq(&dev->power.lock);
1467 }
1468 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1469
1470 /**
1471  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1472  * @dev: Device to handle.
1473  * @use: New value for use_autosuspend.
1474  *
1475  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1476  * suspends as needed.
1477  */
1478 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1479 {
1480         int old_delay, old_use;
1481
1482         spin_lock_irq(&dev->power.lock);
1483         old_delay = dev->power.autosuspend_delay;
1484         old_use = dev->power.use_autosuspend;
1485         dev->power.use_autosuspend = use;
1486         update_autosuspend(dev, old_delay, old_use);
1487         spin_unlock_irq(&dev->power.lock);
1488 }
1489 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1490
1491 /**
1492  * pm_runtime_init - Initialize runtime PM fields in given device object.
1493  * @dev: Device object to initialize.
1494  */
1495 void pm_runtime_init(struct device *dev)
1496 {
1497         dev->power.runtime_status = RPM_SUSPENDED;
1498         dev->power.idle_notification = false;
1499
1500         dev->power.disable_depth = 1;
1501         atomic_set(&dev->power.usage_count, 0);
1502
1503         dev->power.runtime_error = 0;
1504
1505         atomic_set(&dev->power.child_count, 0);
1506         pm_suspend_ignore_children(dev, false);
1507         dev->power.runtime_auto = true;
1508
1509         dev->power.request_pending = false;
1510         dev->power.request = RPM_REQ_NONE;
1511         dev->power.deferred_resume = false;
1512         dev->power.accounting_timestamp = jiffies;
1513         INIT_WORK(&dev->power.work, pm_runtime_work);
1514
1515         dev->power.timer_expires = 0;
1516         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1517         dev->power.suspend_timer.function = pm_suspend_timer_fn;
1518
1519         init_waitqueue_head(&dev->power.wait_queue);
1520 }
1521
1522 /**
1523  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1524  * @dev: Device object to re-initialize.
1525  */
1526 void pm_runtime_reinit(struct device *dev)
1527 {
1528         if (!pm_runtime_enabled(dev)) {
1529                 if (dev->power.runtime_status == RPM_ACTIVE)
1530                         pm_runtime_set_suspended(dev);
1531                 if (dev->power.irq_safe) {
1532                         spin_lock_irq(&dev->power.lock);
1533                         dev->power.irq_safe = 0;
1534                         spin_unlock_irq(&dev->power.lock);
1535                         if (dev->parent)
1536                                 pm_runtime_put(dev->parent);
1537                 }
1538         }
1539 }
1540
1541 /**
1542  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1543  * @dev: Device object being removed from device hierarchy.
1544  */
1545 void pm_runtime_remove(struct device *dev)
1546 {
1547         __pm_runtime_disable(dev, false);
1548         pm_runtime_reinit(dev);
1549 }
1550
1551 /**
1552  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1553  * @dev: Device whose driver is going to be removed.
1554  *
1555  * Check links from this device to any consumers and if any of them have active
1556  * runtime PM references to the device, drop the usage counter of the device
1557  * (once per link).
1558  *
1559  * Links with the DL_FLAG_STATELESS flag set are ignored.
1560  *
1561  * Since the device is guaranteed to be runtime-active at the point this is
1562  * called, nothing else needs to be done here.
1563  *
1564  * Moreover, this is called after device_links_busy() has returned 'false', so
1565  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1566  * therefore rpm_active can't be manipulated concurrently.
1567  */
1568 void pm_runtime_clean_up_links(struct device *dev)
1569 {
1570         struct device_link *link;
1571         int idx;
1572
1573         idx = device_links_read_lock();
1574
1575         list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1576                 if (link->flags & DL_FLAG_STATELESS)
1577                         continue;
1578
1579                 if (link->rpm_active) {
1580                         pm_runtime_put_noidle(dev);
1581                         link->rpm_active = false;
1582                 }
1583         }
1584
1585         device_links_read_unlock(idx);
1586 }
1587
1588 /**
1589  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1590  * @dev: Consumer device.
1591  */
1592 void pm_runtime_get_suppliers(struct device *dev)
1593 {
1594         struct device_link *link;
1595         int idx;
1596
1597         idx = device_links_read_lock();
1598
1599         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1600                 if (link->flags & DL_FLAG_PM_RUNTIME)
1601                         pm_runtime_get_sync(link->supplier);
1602
1603         device_links_read_unlock(idx);
1604 }
1605
1606 /**
1607  * pm_runtime_put_suppliers - Drop references to supplier devices.
1608  * @dev: Consumer device.
1609  */
1610 void pm_runtime_put_suppliers(struct device *dev)
1611 {
1612         struct device_link *link;
1613         int idx;
1614
1615         idx = device_links_read_lock();
1616
1617         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1618                 if (link->flags & DL_FLAG_PM_RUNTIME)
1619                         pm_runtime_put(link->supplier);
1620
1621         device_links_read_unlock(idx);
1622 }
1623
1624 void pm_runtime_new_link(struct device *dev)
1625 {
1626         spin_lock_irq(&dev->power.lock);
1627         dev->power.links_count++;
1628         spin_unlock_irq(&dev->power.lock);
1629 }
1630
1631 void pm_runtime_drop_link(struct device *dev)
1632 {
1633         rpm_put_suppliers(dev);
1634
1635         spin_lock_irq(&dev->power.lock);
1636         WARN_ON(dev->power.links_count == 0);
1637         dev->power.links_count--;
1638         spin_unlock_irq(&dev->power.lock);
1639 }
1640
1641 static bool pm_runtime_need_not_resume(struct device *dev)
1642 {
1643         return atomic_read(&dev->power.usage_count) <= 1 &&
1644                 (atomic_read(&dev->power.child_count) == 0 ||
1645                  dev->power.ignore_children);
1646 }
1647
1648 /**
1649  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1650  * @dev: Device to suspend.
1651  *
1652  * Disable runtime PM so we safely can check the device's runtime PM status and
1653  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1654  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1655  * usage and children counters don't indicate that the device was in use before
1656  * the system-wide transition under way, decrement its parent's children counter
1657  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1658  * unless we encounter errors.
1659  *
1660  * Typically this function may be invoked from a system suspend callback to make
1661  * sure the device is put into low power state and it should only be used during
1662  * system-wide PM transitions to sleep states.  It assumes that the analogous
1663  * pm_runtime_force_resume() will be used to resume the device.
1664  */
1665 int pm_runtime_force_suspend(struct device *dev)
1666 {
1667         int (*callback)(struct device *);
1668         int ret;
1669
1670         pm_runtime_disable(dev);
1671         if (pm_runtime_status_suspended(dev))
1672                 return 0;
1673
1674         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1675
1676         ret = callback ? callback(dev) : 0;
1677         if (ret)
1678                 goto err;
1679
1680         /*
1681          * If the device can stay in suspend after the system-wide transition
1682          * to the working state that will follow, drop the children counter of
1683          * its parent, but set its status to RPM_SUSPENDED anyway in case this
1684          * function will be called again for it in the meantime.
1685          */
1686         if (pm_runtime_need_not_resume(dev))
1687                 pm_runtime_set_suspended(dev);
1688         else
1689                 __update_runtime_status(dev, RPM_SUSPENDED);
1690
1691         return 0;
1692
1693 err:
1694         pm_runtime_enable(dev);
1695         return ret;
1696 }
1697 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1698
1699 /**
1700  * pm_runtime_force_resume - Force a device into resume state if needed.
1701  * @dev: Device to resume.
1702  *
1703  * Prior invoking this function we expect the user to have brought the device
1704  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1705  * those actions and bring the device into full power, if it is expected to be
1706  * used on system resume.  In the other case, we defer the resume to be managed
1707  * via runtime PM.
1708  *
1709  * Typically this function may be invoked from a system resume callback.
1710  */
1711 int pm_runtime_force_resume(struct device *dev)
1712 {
1713         int (*callback)(struct device *);
1714         int ret = 0;
1715
1716         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1717                 goto out;
1718
1719         /*
1720          * The value of the parent's children counter is correct already, so
1721          * just update the status of the device.
1722          */
1723         __update_runtime_status(dev, RPM_ACTIVE);
1724
1725         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1726
1727         ret = callback ? callback(dev) : 0;
1728         if (ret) {
1729                 pm_runtime_set_suspended(dev);
1730                 goto out;
1731         }
1732
1733         pm_runtime_mark_last_busy(dev);
1734 out:
1735         pm_runtime_enable(dev);
1736         return ret;
1737 }
1738 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);