2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched/mm.h>
11 #include <linux/ktime.h>
12 #include <linux/hrtimer.h>
13 #include <linux/export.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_wakeirq.h>
16 #include <trace/events/rpm.h>
21 typedef int (*pm_callback_t)(struct device *);
23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
26 const struct dev_pm_ops *ops;
29 ops = &dev->pm_domain->ops;
30 else if (dev->type && dev->type->pm)
32 else if (dev->class && dev->class->pm)
34 else if (dev->bus && dev->bus->pm)
40 cb = *(pm_callback_t *)((void *)ops + cb_offset);
44 if (!cb && dev->driver && dev->driver->pm)
45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
50 #define RPM_GET_CALLBACK(dev, callback) \
51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
53 static int rpm_resume(struct device *dev, int rpmflags);
54 static int rpm_suspend(struct device *dev, int rpmflags);
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
67 void update_pm_runtime_accounting(struct device *dev)
69 unsigned long now = jiffies;
72 delta = now - dev->power.accounting_timestamp;
74 dev->power.accounting_timestamp = now;
76 if (dev->power.disable_depth > 0)
79 if (dev->power.runtime_status == RPM_SUSPENDED)
80 dev->power.suspended_jiffies += delta;
82 dev->power.active_jiffies += delta;
85 static void __update_runtime_status(struct device *dev, enum rpm_status status)
87 update_pm_runtime_accounting(dev);
88 dev->power.runtime_status = status;
91 u64 pm_runtime_suspended_time(struct device *dev)
93 unsigned long flags, time;
95 spin_lock_irqsave(&dev->power.lock, flags);
97 update_pm_runtime_accounting(dev);
98 time = dev->power.suspended_jiffies;
100 spin_unlock_irqrestore(&dev->power.lock, flags);
102 return jiffies_to_nsecs(time);
104 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
107 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
108 * @dev: Device to handle.
110 static void pm_runtime_deactivate_timer(struct device *dev)
112 if (dev->power.timer_expires > 0) {
113 hrtimer_cancel(&dev->power.suspend_timer);
114 dev->power.timer_expires = 0;
119 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
120 * @dev: Device to handle.
122 static void pm_runtime_cancel_pending(struct device *dev)
124 pm_runtime_deactivate_timer(dev);
126 * In case there's a request pending, make sure its work function will
127 * return without doing anything.
129 dev->power.request = RPM_REQ_NONE;
133 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
134 * @dev: Device to handle.
136 * Compute the autosuspend-delay expiration time based on the device's
137 * power.last_busy time. If the delay has already expired or is disabled
138 * (negative) or the power.use_autosuspend flag isn't set, return 0.
139 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
141 * This function may be called either with or without dev->power.lock held.
142 * Either way it can be racy, since power.last_busy may be updated at any time.
144 u64 pm_runtime_autosuspend_expiration(struct device *dev)
146 int autosuspend_delay;
147 u64 last_busy, expires = 0;
148 u64 now = ktime_to_ns(ktime_get());
150 if (!dev->power.use_autosuspend)
153 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
154 if (autosuspend_delay < 0)
157 last_busy = READ_ONCE(dev->power.last_busy);
159 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
161 expires = 0; /* Already expired. */
166 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
168 static int dev_memalloc_noio(struct device *dev, void *data)
170 return dev->power.memalloc_noio;
174 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
175 * @dev: Device to handle.
176 * @enable: True for setting the flag and False for clearing the flag.
178 * Set the flag for all devices in the path from the device to the
179 * root device in the device tree if @enable is true, otherwise clear
180 * the flag for devices in the path whose siblings don't set the flag.
182 * The function should only be called by block device, or network
183 * device driver for solving the deadlock problem during runtime
186 * If memory allocation with GFP_KERNEL is called inside runtime
187 * resume/suspend callback of any one of its ancestors(or the
188 * block device itself), the deadlock may be triggered inside the
189 * memory allocation since it might not complete until the block
190 * device becomes active and the involed page I/O finishes. The
191 * situation is pointed out first by Alan Stern. Network device
192 * are involved in iSCSI kind of situation.
194 * The lock of dev_hotplug_mutex is held in the function for handling
195 * hotplug race because pm_runtime_set_memalloc_noio() may be called
198 * The function should be called between device_add() and device_del()
199 * on the affected device(block/network device).
201 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
203 static DEFINE_MUTEX(dev_hotplug_mutex);
205 mutex_lock(&dev_hotplug_mutex);
209 /* hold power lock since bitfield is not SMP-safe. */
210 spin_lock_irq(&dev->power.lock);
211 enabled = dev->power.memalloc_noio;
212 dev->power.memalloc_noio = enable;
213 spin_unlock_irq(&dev->power.lock);
216 * not need to enable ancestors any more if the device
219 if (enabled && enable)
225 * clear flag of the parent device only if all the
226 * children don't set the flag because ancestor's
227 * flag was set by any one of the descendants.
229 if (!dev || (!enable &&
230 device_for_each_child(dev, NULL,
234 mutex_unlock(&dev_hotplug_mutex);
236 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
239 * rpm_check_suspend_allowed - Test whether a device may be suspended.
240 * @dev: Device to test.
242 static int rpm_check_suspend_allowed(struct device *dev)
246 if (dev->power.runtime_error)
248 else if (dev->power.disable_depth > 0)
250 else if (atomic_read(&dev->power.usage_count) > 0)
252 else if (!dev->power.ignore_children &&
253 atomic_read(&dev->power.child_count))
256 /* Pending resume requests take precedence over suspends. */
257 else if ((dev->power.deferred_resume
258 && dev->power.runtime_status == RPM_SUSPENDING)
259 || (dev->power.request_pending
260 && dev->power.request == RPM_REQ_RESUME))
262 else if (__dev_pm_qos_read_value(dev) == 0)
264 else if (dev->power.runtime_status == RPM_SUSPENDED)
270 static int rpm_get_suppliers(struct device *dev)
272 struct device_link *link;
274 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
277 if (!(link->flags & DL_FLAG_PM_RUNTIME))
280 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
284 retval = pm_runtime_get_sync(link->supplier);
285 /* Ignore suppliers with disabled runtime PM. */
286 if (retval < 0 && retval != -EACCES) {
287 pm_runtime_put_noidle(link->supplier);
290 link->rpm_active = true;
295 static void rpm_put_suppliers(struct device *dev)
297 struct device_link *link;
299 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
300 if (link->rpm_active &&
301 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
302 pm_runtime_put(link->supplier);
303 link->rpm_active = false;
308 * __rpm_callback - Run a given runtime PM callback for a given device.
309 * @cb: Runtime PM callback to run.
310 * @dev: Device to run the callback for.
312 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
313 __releases(&dev->power.lock) __acquires(&dev->power.lock)
316 bool use_links = dev->power.links_count > 0;
318 if (dev->power.irq_safe) {
319 spin_unlock(&dev->power.lock);
321 spin_unlock_irq(&dev->power.lock);
324 * Resume suppliers if necessary.
326 * The device's runtime PM status cannot change until this
327 * routine returns, so it is safe to read the status outside of
330 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
331 idx = device_links_read_lock();
333 retval = rpm_get_suppliers(dev);
337 device_links_read_unlock(idx);
343 if (dev->power.irq_safe) {
344 spin_lock(&dev->power.lock);
347 * If the device is suspending and the callback has returned
348 * success, drop the usage counters of the suppliers that have
349 * been reference counted on its resume.
351 * Do that if resume fails too.
354 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
355 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
356 idx = device_links_read_lock();
359 rpm_put_suppliers(dev);
361 device_links_read_unlock(idx);
364 spin_lock_irq(&dev->power.lock);
371 * rpm_idle - Notify device bus type if the device can be suspended.
372 * @dev: Device to notify the bus type about.
373 * @rpmflags: Flag bits.
375 * Check if the device's runtime PM status allows it to be suspended. If
376 * another idle notification has been started earlier, return immediately. If
377 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
378 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
379 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
381 * This function must be called under dev->power.lock with interrupts disabled.
383 static int rpm_idle(struct device *dev, int rpmflags)
385 int (*callback)(struct device *);
388 trace_rpm_idle_rcuidle(dev, rpmflags);
389 retval = rpm_check_suspend_allowed(dev);
391 ; /* Conditions are wrong. */
393 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
394 else if (dev->power.runtime_status != RPM_ACTIVE)
398 * Any pending request other than an idle notification takes
399 * precedence over us, except that the timer may be running.
401 else if (dev->power.request_pending &&
402 dev->power.request > RPM_REQ_IDLE)
405 /* Act as though RPM_NOWAIT is always set. */
406 else if (dev->power.idle_notification)
407 retval = -EINPROGRESS;
411 /* Pending requests need to be canceled. */
412 dev->power.request = RPM_REQ_NONE;
414 if (dev->power.no_callbacks)
417 /* Carry out an asynchronous or a synchronous idle notification. */
418 if (rpmflags & RPM_ASYNC) {
419 dev->power.request = RPM_REQ_IDLE;
420 if (!dev->power.request_pending) {
421 dev->power.request_pending = true;
422 queue_work(pm_wq, &dev->power.work);
424 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
428 dev->power.idle_notification = true;
430 callback = RPM_GET_CALLBACK(dev, runtime_idle);
433 retval = __rpm_callback(callback, dev);
435 dev->power.idle_notification = false;
436 wake_up_all(&dev->power.wait_queue);
439 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
440 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
444 * rpm_callback - Run a given runtime PM callback for a given device.
445 * @cb: Runtime PM callback to run.
446 * @dev: Device to run the callback for.
448 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
455 if (dev->power.memalloc_noio) {
456 unsigned int noio_flag;
459 * Deadlock might be caused if memory allocation with
460 * GFP_KERNEL happens inside runtime_suspend and
461 * runtime_resume callbacks of one block device's
462 * ancestor or the block device itself. Network
463 * device might be thought as part of iSCSI block
464 * device, so network device and its ancestor should
465 * be marked as memalloc_noio too.
467 noio_flag = memalloc_noio_save();
468 retval = __rpm_callback(cb, dev);
469 memalloc_noio_restore(noio_flag);
471 retval = __rpm_callback(cb, dev);
474 dev->power.runtime_error = retval;
475 return retval != -EACCES ? retval : -EIO;
479 * rpm_suspend - Carry out runtime suspend of given device.
480 * @dev: Device to suspend.
481 * @rpmflags: Flag bits.
483 * Check if the device's runtime PM status allows it to be suspended.
484 * Cancel a pending idle notification, autosuspend or suspend. If
485 * another suspend has been started earlier, either return immediately
486 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
487 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
488 * otherwise run the ->runtime_suspend() callback directly. When
489 * ->runtime_suspend succeeded, if a deferred resume was requested while
490 * the callback was running then carry it out, otherwise send an idle
491 * notification for its parent (if the suspend succeeded and both
492 * ignore_children of parent->power and irq_safe of dev->power are not set).
493 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
494 * flag is set and the next autosuspend-delay expiration time is in the
495 * future, schedule another autosuspend attempt.
497 * This function must be called under dev->power.lock with interrupts disabled.
499 static int rpm_suspend(struct device *dev, int rpmflags)
500 __releases(&dev->power.lock) __acquires(&dev->power.lock)
502 int (*callback)(struct device *);
503 struct device *parent = NULL;
506 trace_rpm_suspend_rcuidle(dev, rpmflags);
509 retval = rpm_check_suspend_allowed(dev);
512 ; /* Conditions are wrong. */
514 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
515 else if (dev->power.runtime_status == RPM_RESUMING &&
516 !(rpmflags & RPM_ASYNC))
521 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
522 if ((rpmflags & RPM_AUTO)
523 && dev->power.runtime_status != RPM_SUSPENDING) {
524 u64 expires = pm_runtime_autosuspend_expiration(dev);
527 /* Pending requests need to be canceled. */
528 dev->power.request = RPM_REQ_NONE;
531 * Optimization: If the timer is already running and is
532 * set to expire at or before the autosuspend delay,
533 * avoid the overhead of resetting it. Just let it
534 * expire; pm_suspend_timer_fn() will take care of the
537 if (!(dev->power.timer_expires &&
538 dev->power.timer_expires <= expires)) {
540 * We add a slack of 25% to gather wakeups
541 * without sacrificing the granularity.
543 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
544 (NSEC_PER_MSEC >> 2);
546 dev->power.timer_expires = expires;
547 hrtimer_start_range_ns(&dev->power.suspend_timer,
548 ns_to_ktime(expires),
552 dev->power.timer_autosuspends = 1;
557 /* Other scheduled or pending requests need to be canceled. */
558 pm_runtime_cancel_pending(dev);
560 if (dev->power.runtime_status == RPM_SUSPENDING) {
563 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
564 retval = -EINPROGRESS;
568 if (dev->power.irq_safe) {
569 spin_unlock(&dev->power.lock);
573 spin_lock(&dev->power.lock);
577 /* Wait for the other suspend running in parallel with us. */
579 prepare_to_wait(&dev->power.wait_queue, &wait,
580 TASK_UNINTERRUPTIBLE);
581 if (dev->power.runtime_status != RPM_SUSPENDING)
584 spin_unlock_irq(&dev->power.lock);
588 spin_lock_irq(&dev->power.lock);
590 finish_wait(&dev->power.wait_queue, &wait);
594 if (dev->power.no_callbacks)
595 goto no_callback; /* Assume success. */
597 /* Carry out an asynchronous or a synchronous suspend. */
598 if (rpmflags & RPM_ASYNC) {
599 dev->power.request = (rpmflags & RPM_AUTO) ?
600 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
601 if (!dev->power.request_pending) {
602 dev->power.request_pending = true;
603 queue_work(pm_wq, &dev->power.work);
608 __update_runtime_status(dev, RPM_SUSPENDING);
610 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
612 dev_pm_enable_wake_irq_check(dev, true);
613 retval = rpm_callback(callback, dev);
618 __update_runtime_status(dev, RPM_SUSPENDED);
619 pm_runtime_deactivate_timer(dev);
622 parent = dev->parent;
623 atomic_add_unless(&parent->power.child_count, -1, 0);
625 wake_up_all(&dev->power.wait_queue);
627 if (dev->power.deferred_resume) {
628 dev->power.deferred_resume = false;
634 /* Maybe the parent is now able to suspend. */
635 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
636 spin_unlock(&dev->power.lock);
638 spin_lock(&parent->power.lock);
639 rpm_idle(parent, RPM_ASYNC);
640 spin_unlock(&parent->power.lock);
642 spin_lock(&dev->power.lock);
646 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
651 dev_pm_disable_wake_irq_check(dev);
652 __update_runtime_status(dev, RPM_ACTIVE);
653 dev->power.deferred_resume = false;
654 wake_up_all(&dev->power.wait_queue);
656 if (retval == -EAGAIN || retval == -EBUSY) {
657 dev->power.runtime_error = 0;
660 * If the callback routine failed an autosuspend, and
661 * if the last_busy time has been updated so that there
662 * is a new autosuspend expiration time, automatically
663 * reschedule another autosuspend.
665 if ((rpmflags & RPM_AUTO) &&
666 pm_runtime_autosuspend_expiration(dev) != 0)
669 pm_runtime_cancel_pending(dev);
675 * rpm_resume - Carry out runtime resume of given device.
676 * @dev: Device to resume.
677 * @rpmflags: Flag bits.
679 * Check if the device's runtime PM status allows it to be resumed. Cancel
680 * any scheduled or pending requests. If another resume has been started
681 * earlier, either return immediately or wait for it to finish, depending on the
682 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
683 * parallel with this function, either tell the other process to resume after
684 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
685 * flag is set then queue a resume request; otherwise run the
686 * ->runtime_resume() callback directly. Queue an idle notification for the
687 * device if the resume succeeded.
689 * This function must be called under dev->power.lock with interrupts disabled.
691 static int rpm_resume(struct device *dev, int rpmflags)
692 __releases(&dev->power.lock) __acquires(&dev->power.lock)
694 int (*callback)(struct device *);
695 struct device *parent = NULL;
698 trace_rpm_resume_rcuidle(dev, rpmflags);
701 if (dev->power.runtime_error)
703 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
704 && dev->power.runtime_status == RPM_ACTIVE)
706 else if (dev->power.disable_depth > 0)
712 * Other scheduled or pending requests need to be canceled. Small
713 * optimization: If an autosuspend timer is running, leave it running
714 * rather than cancelling it now only to restart it again in the near
717 dev->power.request = RPM_REQ_NONE;
718 if (!dev->power.timer_autosuspends)
719 pm_runtime_deactivate_timer(dev);
721 if (dev->power.runtime_status == RPM_ACTIVE) {
726 if (dev->power.runtime_status == RPM_RESUMING
727 || dev->power.runtime_status == RPM_SUSPENDING) {
730 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
731 if (dev->power.runtime_status == RPM_SUSPENDING)
732 dev->power.deferred_resume = true;
734 retval = -EINPROGRESS;
738 if (dev->power.irq_safe) {
739 spin_unlock(&dev->power.lock);
743 spin_lock(&dev->power.lock);
747 /* Wait for the operation carried out in parallel with us. */
749 prepare_to_wait(&dev->power.wait_queue, &wait,
750 TASK_UNINTERRUPTIBLE);
751 if (dev->power.runtime_status != RPM_RESUMING
752 && dev->power.runtime_status != RPM_SUSPENDING)
755 spin_unlock_irq(&dev->power.lock);
759 spin_lock_irq(&dev->power.lock);
761 finish_wait(&dev->power.wait_queue, &wait);
766 * See if we can skip waking up the parent. This is safe only if
767 * power.no_callbacks is set, because otherwise we don't know whether
768 * the resume will actually succeed.
770 if (dev->power.no_callbacks && !parent && dev->parent) {
771 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
772 if (dev->parent->power.disable_depth > 0
773 || dev->parent->power.ignore_children
774 || dev->parent->power.runtime_status == RPM_ACTIVE) {
775 atomic_inc(&dev->parent->power.child_count);
776 spin_unlock(&dev->parent->power.lock);
778 goto no_callback; /* Assume success. */
780 spin_unlock(&dev->parent->power.lock);
783 /* Carry out an asynchronous or a synchronous resume. */
784 if (rpmflags & RPM_ASYNC) {
785 dev->power.request = RPM_REQ_RESUME;
786 if (!dev->power.request_pending) {
787 dev->power.request_pending = true;
788 queue_work(pm_wq, &dev->power.work);
794 if (!parent && dev->parent) {
796 * Increment the parent's usage counter and resume it if
797 * necessary. Not needed if dev is irq-safe; then the
798 * parent is permanently resumed.
800 parent = dev->parent;
801 if (dev->power.irq_safe)
803 spin_unlock(&dev->power.lock);
805 pm_runtime_get_noresume(parent);
807 spin_lock(&parent->power.lock);
809 * Resume the parent if it has runtime PM enabled and not been
810 * set to ignore its children.
812 if (!parent->power.disable_depth
813 && !parent->power.ignore_children) {
814 rpm_resume(parent, 0);
815 if (parent->power.runtime_status != RPM_ACTIVE)
818 spin_unlock(&parent->power.lock);
820 spin_lock(&dev->power.lock);
827 if (dev->power.no_callbacks)
828 goto no_callback; /* Assume success. */
830 __update_runtime_status(dev, RPM_RESUMING);
832 callback = RPM_GET_CALLBACK(dev, runtime_resume);
834 dev_pm_disable_wake_irq_check(dev);
835 retval = rpm_callback(callback, dev);
837 __update_runtime_status(dev, RPM_SUSPENDED);
838 pm_runtime_cancel_pending(dev);
839 dev_pm_enable_wake_irq_check(dev, false);
842 __update_runtime_status(dev, RPM_ACTIVE);
843 pm_runtime_mark_last_busy(dev);
845 atomic_inc(&parent->power.child_count);
847 wake_up_all(&dev->power.wait_queue);
850 rpm_idle(dev, RPM_ASYNC);
853 if (parent && !dev->power.irq_safe) {
854 spin_unlock_irq(&dev->power.lock);
856 pm_runtime_put(parent);
858 spin_lock_irq(&dev->power.lock);
861 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
867 * pm_runtime_work - Universal runtime PM work function.
868 * @work: Work structure used for scheduling the execution of this function.
870 * Use @work to get the device object the work is to be done for, determine what
871 * is to be done and execute the appropriate runtime PM function.
873 static void pm_runtime_work(struct work_struct *work)
875 struct device *dev = container_of(work, struct device, power.work);
876 enum rpm_request req;
878 spin_lock_irq(&dev->power.lock);
880 if (!dev->power.request_pending)
883 req = dev->power.request;
884 dev->power.request = RPM_REQ_NONE;
885 dev->power.request_pending = false;
891 rpm_idle(dev, RPM_NOWAIT);
893 case RPM_REQ_SUSPEND:
894 rpm_suspend(dev, RPM_NOWAIT);
896 case RPM_REQ_AUTOSUSPEND:
897 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
900 rpm_resume(dev, RPM_NOWAIT);
905 spin_unlock_irq(&dev->power.lock);
909 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
910 * @data: Device pointer passed by pm_schedule_suspend().
912 * Check if the time is right and queue a suspend request.
914 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
916 struct device *dev = container_of(timer, struct device, power.suspend_timer);
920 spin_lock_irqsave(&dev->power.lock, flags);
922 expires = dev->power.timer_expires;
924 * If 'expires' is after the current time, we've been called
927 if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
928 dev->power.timer_expires = 0;
929 rpm_suspend(dev, dev->power.timer_autosuspends ?
930 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
933 spin_unlock_irqrestore(&dev->power.lock, flags);
935 return HRTIMER_NORESTART;
939 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
940 * @dev: Device to suspend.
941 * @delay: Time to wait before submitting a suspend request, in milliseconds.
943 int pm_schedule_suspend(struct device *dev, unsigned int delay)
949 spin_lock_irqsave(&dev->power.lock, flags);
952 retval = rpm_suspend(dev, RPM_ASYNC);
956 retval = rpm_check_suspend_allowed(dev);
960 /* Other scheduled or pending requests need to be canceled. */
961 pm_runtime_cancel_pending(dev);
963 expires = ktime_add(ktime_get(), ms_to_ktime(delay));
964 dev->power.timer_expires = ktime_to_ns(expires);
965 dev->power.timer_autosuspends = 0;
966 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
969 spin_unlock_irqrestore(&dev->power.lock, flags);
973 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
976 * __pm_runtime_idle - Entry point for runtime idle operations.
977 * @dev: Device to send idle notification for.
978 * @rpmflags: Flag bits.
980 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
981 * return immediately if it is larger than zero. Then carry out an idle
982 * notification, either synchronous or asynchronous.
984 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
985 * or if pm_runtime_irq_safe() has been called.
987 int __pm_runtime_idle(struct device *dev, int rpmflags)
992 if (rpmflags & RPM_GET_PUT) {
993 if (!atomic_dec_and_test(&dev->power.usage_count))
997 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
999 spin_lock_irqsave(&dev->power.lock, flags);
1000 retval = rpm_idle(dev, rpmflags);
1001 spin_unlock_irqrestore(&dev->power.lock, flags);
1005 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1008 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1009 * @dev: Device to suspend.
1010 * @rpmflags: Flag bits.
1012 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1013 * return immediately if it is larger than zero. Then carry out a suspend,
1014 * either synchronous or asynchronous.
1016 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1017 * or if pm_runtime_irq_safe() has been called.
1019 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1021 unsigned long flags;
1024 if (rpmflags & RPM_GET_PUT) {
1025 if (!atomic_dec_and_test(&dev->power.usage_count))
1029 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1031 spin_lock_irqsave(&dev->power.lock, flags);
1032 retval = rpm_suspend(dev, rpmflags);
1033 spin_unlock_irqrestore(&dev->power.lock, flags);
1037 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1040 * __pm_runtime_resume - Entry point for runtime resume operations.
1041 * @dev: Device to resume.
1042 * @rpmflags: Flag bits.
1044 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1045 * carry out a resume, either synchronous or asynchronous.
1047 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1048 * or if pm_runtime_irq_safe() has been called.
1050 int __pm_runtime_resume(struct device *dev, int rpmflags)
1052 unsigned long flags;
1055 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1056 dev->power.runtime_status != RPM_ACTIVE);
1058 if (rpmflags & RPM_GET_PUT)
1059 atomic_inc(&dev->power.usage_count);
1061 spin_lock_irqsave(&dev->power.lock, flags);
1062 retval = rpm_resume(dev, rpmflags);
1063 spin_unlock_irqrestore(&dev->power.lock, flags);
1067 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1070 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1071 * @dev: Device to handle.
1073 * Return -EINVAL if runtime PM is disabled for the device.
1075 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1076 * and the runtime PM usage counter is nonzero, increment the counter and
1077 * return 1. Otherwise return 0 without changing the counter.
1079 int pm_runtime_get_if_in_use(struct device *dev)
1081 unsigned long flags;
1084 spin_lock_irqsave(&dev->power.lock, flags);
1085 retval = dev->power.disable_depth > 0 ? -EINVAL :
1086 dev->power.runtime_status == RPM_ACTIVE
1087 && atomic_inc_not_zero(&dev->power.usage_count);
1088 spin_unlock_irqrestore(&dev->power.lock, flags);
1091 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1094 * __pm_runtime_set_status - Set runtime PM status of a device.
1095 * @dev: Device to handle.
1096 * @status: New runtime PM status of the device.
1098 * If runtime PM of the device is disabled or its power.runtime_error field is
1099 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1100 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1101 * However, if the device has a parent and the parent is not active, and the
1102 * parent's power.ignore_children flag is unset, the device's status cannot be
1103 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1105 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1106 * and the device parent's counter of unsuspended children is modified to
1107 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1108 * notification request for the parent is submitted.
1110 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1112 struct device *parent = dev->parent;
1113 unsigned long flags;
1114 bool notify_parent = false;
1117 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1120 spin_lock_irqsave(&dev->power.lock, flags);
1122 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1127 if (dev->power.runtime_status == status || !parent)
1130 if (status == RPM_SUSPENDED) {
1131 atomic_add_unless(&parent->power.child_count, -1, 0);
1132 notify_parent = !parent->power.ignore_children;
1134 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1137 * It is invalid to put an active child under a parent that is
1138 * not active, has runtime PM enabled and the
1139 * 'power.ignore_children' flag unset.
1141 if (!parent->power.disable_depth
1142 && !parent->power.ignore_children
1143 && parent->power.runtime_status != RPM_ACTIVE) {
1144 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1148 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1149 atomic_inc(&parent->power.child_count);
1152 spin_unlock(&parent->power.lock);
1159 __update_runtime_status(dev, status);
1160 dev->power.runtime_error = 0;
1162 spin_unlock_irqrestore(&dev->power.lock, flags);
1165 pm_request_idle(parent);
1169 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1172 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1173 * @dev: Device to handle.
1175 * Flush all pending requests for the device from pm_wq and wait for all
1176 * runtime PM operations involving the device in progress to complete.
1178 * Should be called under dev->power.lock with interrupts disabled.
1180 static void __pm_runtime_barrier(struct device *dev)
1182 pm_runtime_deactivate_timer(dev);
1184 if (dev->power.request_pending) {
1185 dev->power.request = RPM_REQ_NONE;
1186 spin_unlock_irq(&dev->power.lock);
1188 cancel_work_sync(&dev->power.work);
1190 spin_lock_irq(&dev->power.lock);
1191 dev->power.request_pending = false;
1194 if (dev->power.runtime_status == RPM_SUSPENDING
1195 || dev->power.runtime_status == RPM_RESUMING
1196 || dev->power.idle_notification) {
1199 /* Suspend, wake-up or idle notification in progress. */
1201 prepare_to_wait(&dev->power.wait_queue, &wait,
1202 TASK_UNINTERRUPTIBLE);
1203 if (dev->power.runtime_status != RPM_SUSPENDING
1204 && dev->power.runtime_status != RPM_RESUMING
1205 && !dev->power.idle_notification)
1207 spin_unlock_irq(&dev->power.lock);
1211 spin_lock_irq(&dev->power.lock);
1213 finish_wait(&dev->power.wait_queue, &wait);
1218 * pm_runtime_barrier - Flush pending requests and wait for completions.
1219 * @dev: Device to handle.
1221 * Prevent the device from being suspended by incrementing its usage counter and
1222 * if there's a pending resume request for the device, wake the device up.
1223 * Next, make sure that all pending requests for the device have been flushed
1224 * from pm_wq and wait for all runtime PM operations involving the device in
1225 * progress to complete.
1228 * 1, if there was a resume request pending and the device had to be woken up,
1231 int pm_runtime_barrier(struct device *dev)
1235 pm_runtime_get_noresume(dev);
1236 spin_lock_irq(&dev->power.lock);
1238 if (dev->power.request_pending
1239 && dev->power.request == RPM_REQ_RESUME) {
1244 __pm_runtime_barrier(dev);
1246 spin_unlock_irq(&dev->power.lock);
1247 pm_runtime_put_noidle(dev);
1251 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1254 * __pm_runtime_disable - Disable runtime PM of a device.
1255 * @dev: Device to handle.
1256 * @check_resume: If set, check if there's a resume request for the device.
1258 * Increment power.disable_depth for the device and if it was zero previously,
1259 * cancel all pending runtime PM requests for the device and wait for all
1260 * operations in progress to complete. The device can be either active or
1261 * suspended after its runtime PM has been disabled.
1263 * If @check_resume is set and there's a resume request pending when
1264 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1265 * function will wake up the device before disabling its runtime PM.
1267 void __pm_runtime_disable(struct device *dev, bool check_resume)
1269 spin_lock_irq(&dev->power.lock);
1271 if (dev->power.disable_depth > 0) {
1272 dev->power.disable_depth++;
1277 * Wake up the device if there's a resume request pending, because that
1278 * means there probably is some I/O to process and disabling runtime PM
1279 * shouldn't prevent the device from processing the I/O.
1281 if (check_resume && dev->power.request_pending
1282 && dev->power.request == RPM_REQ_RESUME) {
1284 * Prevent suspends and idle notifications from being carried
1285 * out after we have woken up the device.
1287 pm_runtime_get_noresume(dev);
1291 pm_runtime_put_noidle(dev);
1294 if (!dev->power.disable_depth++)
1295 __pm_runtime_barrier(dev);
1298 spin_unlock_irq(&dev->power.lock);
1300 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1303 * pm_runtime_enable - Enable runtime PM of a device.
1304 * @dev: Device to handle.
1306 void pm_runtime_enable(struct device *dev)
1308 unsigned long flags;
1310 spin_lock_irqsave(&dev->power.lock, flags);
1312 if (dev->power.disable_depth > 0)
1313 dev->power.disable_depth--;
1315 dev_warn(dev, "Unbalanced %s!\n", __func__);
1317 WARN(!dev->power.disable_depth &&
1318 dev->power.runtime_status == RPM_SUSPENDED &&
1319 !dev->power.ignore_children &&
1320 atomic_read(&dev->power.child_count) > 0,
1321 "Enabling runtime PM for inactive device (%s) with active children\n",
1324 spin_unlock_irqrestore(&dev->power.lock, flags);
1326 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1329 * pm_runtime_forbid - Block runtime PM of a device.
1330 * @dev: Device to handle.
1332 * Increase the device's usage count and clear its power.runtime_auto flag,
1333 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1336 void pm_runtime_forbid(struct device *dev)
1338 spin_lock_irq(&dev->power.lock);
1339 if (!dev->power.runtime_auto)
1342 dev->power.runtime_auto = false;
1343 atomic_inc(&dev->power.usage_count);
1347 spin_unlock_irq(&dev->power.lock);
1349 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1352 * pm_runtime_allow - Unblock runtime PM of a device.
1353 * @dev: Device to handle.
1355 * Decrease the device's usage count and set its power.runtime_auto flag.
1357 void pm_runtime_allow(struct device *dev)
1359 spin_lock_irq(&dev->power.lock);
1360 if (dev->power.runtime_auto)
1363 dev->power.runtime_auto = true;
1364 if (atomic_dec_and_test(&dev->power.usage_count))
1365 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1368 spin_unlock_irq(&dev->power.lock);
1370 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1373 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1374 * @dev: Device to handle.
1376 * Set the power.no_callbacks flag, which tells the PM core that this
1377 * device is power-managed through its parent and has no runtime PM
1378 * callbacks of its own. The runtime sysfs attributes will be removed.
1380 void pm_runtime_no_callbacks(struct device *dev)
1382 spin_lock_irq(&dev->power.lock);
1383 dev->power.no_callbacks = 1;
1384 spin_unlock_irq(&dev->power.lock);
1385 if (device_is_registered(dev))
1386 rpm_sysfs_remove(dev);
1388 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1391 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1392 * @dev: Device to handle
1394 * Set the power.irq_safe flag, which tells the PM core that the
1395 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1396 * always be invoked with the spinlock held and interrupts disabled. It also
1397 * causes the parent's usage counter to be permanently incremented, preventing
1398 * the parent from runtime suspending -- otherwise an irq-safe child might have
1399 * to wait for a non-irq-safe parent.
1401 void pm_runtime_irq_safe(struct device *dev)
1404 pm_runtime_get_sync(dev->parent);
1405 spin_lock_irq(&dev->power.lock);
1406 dev->power.irq_safe = 1;
1407 spin_unlock_irq(&dev->power.lock);
1409 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1412 * update_autosuspend - Handle a change to a device's autosuspend settings.
1413 * @dev: Device to handle.
1414 * @old_delay: The former autosuspend_delay value.
1415 * @old_use: The former use_autosuspend value.
1417 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1418 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1420 * This function must be called under dev->power.lock with interrupts disabled.
1422 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1424 int delay = dev->power.autosuspend_delay;
1426 /* Should runtime suspend be prevented now? */
1427 if (dev->power.use_autosuspend && delay < 0) {
1429 /* If it used to be allowed then prevent it. */
1430 if (!old_use || old_delay >= 0) {
1431 atomic_inc(&dev->power.usage_count);
1436 /* Runtime suspend should be allowed now. */
1439 /* If it used to be prevented then allow it. */
1440 if (old_use && old_delay < 0)
1441 atomic_dec(&dev->power.usage_count);
1443 /* Maybe we can autosuspend now. */
1444 rpm_idle(dev, RPM_AUTO);
1449 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1450 * @dev: Device to handle.
1451 * @delay: Value of the new delay in milliseconds.
1453 * Set the device's power.autosuspend_delay value. If it changes to negative
1454 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1455 * changes the other way, allow runtime suspends.
1457 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1459 int old_delay, old_use;
1461 spin_lock_irq(&dev->power.lock);
1462 old_delay = dev->power.autosuspend_delay;
1463 old_use = dev->power.use_autosuspend;
1464 dev->power.autosuspend_delay = delay;
1465 update_autosuspend(dev, old_delay, old_use);
1466 spin_unlock_irq(&dev->power.lock);
1468 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1471 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1472 * @dev: Device to handle.
1473 * @use: New value for use_autosuspend.
1475 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1476 * suspends as needed.
1478 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1480 int old_delay, old_use;
1482 spin_lock_irq(&dev->power.lock);
1483 old_delay = dev->power.autosuspend_delay;
1484 old_use = dev->power.use_autosuspend;
1485 dev->power.use_autosuspend = use;
1486 update_autosuspend(dev, old_delay, old_use);
1487 spin_unlock_irq(&dev->power.lock);
1489 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1492 * pm_runtime_init - Initialize runtime PM fields in given device object.
1493 * @dev: Device object to initialize.
1495 void pm_runtime_init(struct device *dev)
1497 dev->power.runtime_status = RPM_SUSPENDED;
1498 dev->power.idle_notification = false;
1500 dev->power.disable_depth = 1;
1501 atomic_set(&dev->power.usage_count, 0);
1503 dev->power.runtime_error = 0;
1505 atomic_set(&dev->power.child_count, 0);
1506 pm_suspend_ignore_children(dev, false);
1507 dev->power.runtime_auto = true;
1509 dev->power.request_pending = false;
1510 dev->power.request = RPM_REQ_NONE;
1511 dev->power.deferred_resume = false;
1512 dev->power.accounting_timestamp = jiffies;
1513 INIT_WORK(&dev->power.work, pm_runtime_work);
1515 dev->power.timer_expires = 0;
1516 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1517 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1519 init_waitqueue_head(&dev->power.wait_queue);
1523 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1524 * @dev: Device object to re-initialize.
1526 void pm_runtime_reinit(struct device *dev)
1528 if (!pm_runtime_enabled(dev)) {
1529 if (dev->power.runtime_status == RPM_ACTIVE)
1530 pm_runtime_set_suspended(dev);
1531 if (dev->power.irq_safe) {
1532 spin_lock_irq(&dev->power.lock);
1533 dev->power.irq_safe = 0;
1534 spin_unlock_irq(&dev->power.lock);
1536 pm_runtime_put(dev->parent);
1542 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1543 * @dev: Device object being removed from device hierarchy.
1545 void pm_runtime_remove(struct device *dev)
1547 __pm_runtime_disable(dev, false);
1548 pm_runtime_reinit(dev);
1552 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1553 * @dev: Device whose driver is going to be removed.
1555 * Check links from this device to any consumers and if any of them have active
1556 * runtime PM references to the device, drop the usage counter of the device
1559 * Links with the DL_FLAG_STATELESS flag set are ignored.
1561 * Since the device is guaranteed to be runtime-active at the point this is
1562 * called, nothing else needs to be done here.
1564 * Moreover, this is called after device_links_busy() has returned 'false', so
1565 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1566 * therefore rpm_active can't be manipulated concurrently.
1568 void pm_runtime_clean_up_links(struct device *dev)
1570 struct device_link *link;
1573 idx = device_links_read_lock();
1575 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1576 if (link->flags & DL_FLAG_STATELESS)
1579 if (link->rpm_active) {
1580 pm_runtime_put_noidle(dev);
1581 link->rpm_active = false;
1585 device_links_read_unlock(idx);
1589 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1590 * @dev: Consumer device.
1592 void pm_runtime_get_suppliers(struct device *dev)
1594 struct device_link *link;
1597 idx = device_links_read_lock();
1599 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1600 if (link->flags & DL_FLAG_PM_RUNTIME)
1601 pm_runtime_get_sync(link->supplier);
1603 device_links_read_unlock(idx);
1607 * pm_runtime_put_suppliers - Drop references to supplier devices.
1608 * @dev: Consumer device.
1610 void pm_runtime_put_suppliers(struct device *dev)
1612 struct device_link *link;
1615 idx = device_links_read_lock();
1617 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1618 if (link->flags & DL_FLAG_PM_RUNTIME)
1619 pm_runtime_put(link->supplier);
1621 device_links_read_unlock(idx);
1624 void pm_runtime_new_link(struct device *dev)
1626 spin_lock_irq(&dev->power.lock);
1627 dev->power.links_count++;
1628 spin_unlock_irq(&dev->power.lock);
1631 void pm_runtime_drop_link(struct device *dev)
1633 rpm_put_suppliers(dev);
1635 spin_lock_irq(&dev->power.lock);
1636 WARN_ON(dev->power.links_count == 0);
1637 dev->power.links_count--;
1638 spin_unlock_irq(&dev->power.lock);
1641 static bool pm_runtime_need_not_resume(struct device *dev)
1643 return atomic_read(&dev->power.usage_count) <= 1 &&
1644 (atomic_read(&dev->power.child_count) == 0 ||
1645 dev->power.ignore_children);
1649 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1650 * @dev: Device to suspend.
1652 * Disable runtime PM so we safely can check the device's runtime PM status and
1653 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1654 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1655 * usage and children counters don't indicate that the device was in use before
1656 * the system-wide transition under way, decrement its parent's children counter
1657 * (if there is a parent). Keep runtime PM disabled to preserve the state
1658 * unless we encounter errors.
1660 * Typically this function may be invoked from a system suspend callback to make
1661 * sure the device is put into low power state and it should only be used during
1662 * system-wide PM transitions to sleep states. It assumes that the analogous
1663 * pm_runtime_force_resume() will be used to resume the device.
1665 int pm_runtime_force_suspend(struct device *dev)
1667 int (*callback)(struct device *);
1670 pm_runtime_disable(dev);
1671 if (pm_runtime_status_suspended(dev))
1674 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1676 ret = callback ? callback(dev) : 0;
1681 * If the device can stay in suspend after the system-wide transition
1682 * to the working state that will follow, drop the children counter of
1683 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1684 * function will be called again for it in the meantime.
1686 if (pm_runtime_need_not_resume(dev))
1687 pm_runtime_set_suspended(dev);
1689 __update_runtime_status(dev, RPM_SUSPENDED);
1694 pm_runtime_enable(dev);
1697 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1700 * pm_runtime_force_resume - Force a device into resume state if needed.
1701 * @dev: Device to resume.
1703 * Prior invoking this function we expect the user to have brought the device
1704 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1705 * those actions and bring the device into full power, if it is expected to be
1706 * used on system resume. In the other case, we defer the resume to be managed
1709 * Typically this function may be invoked from a system resume callback.
1711 int pm_runtime_force_resume(struct device *dev)
1713 int (*callback)(struct device *);
1716 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1720 * The value of the parent's children counter is correct already, so
1721 * just update the status of the device.
1723 __update_runtime_status(dev, RPM_ACTIVE);
1725 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1727 ret = callback ? callback(dev) : 0;
1729 pm_runtime_set_suspended(dev);
1733 pm_runtime_mark_last_busy(dev);
1735 pm_runtime_enable(dev);
1738 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);