1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
19 typedef int (*pm_callback_t)(struct device *);
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
24 const struct dev_pm_ops *ops;
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
30 else if (dev->class && dev->class->pm)
32 else if (dev->bus && dev->bus->pm)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
55 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
65 static void update_pm_runtime_accounting(struct device *dev)
69 if (dev->power.disable_depth > 0)
72 last = dev->power.accounting_timestamp;
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
78 * Because ktime_get_mono_fast_ns() is not monotonic during
79 * timekeeping updates, ensure that 'now' is after the last saved
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
90 dev->power.active_time += delta;
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
104 spin_lock_irqsave(&dev->power.lock, flags);
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
114 u64 pm_runtime_active_time(struct device *dev)
116 return rpm_get_accounted_time(dev, false);
119 u64 pm_runtime_suspended_time(struct device *dev)
121 return rpm_get_accounted_time(dev, true);
123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
127 * @dev: Device to handle.
129 static void pm_runtime_deactivate_timer(struct device *dev)
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
138 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
139 * @dev: Device to handle.
141 static void pm_runtime_cancel_pending(struct device *dev)
143 pm_runtime_deactivate_timer(dev);
145 * In case there's a request pending, make sure its work function will
146 * return without doing anything.
148 dev->power.request = RPM_REQ_NONE;
152 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
153 * @dev: Device to handle.
155 * Compute the autosuspend-delay expiration time based on the device's
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
158 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
165 int autosuspend_delay;
168 if (!dev->power.use_autosuspend)
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires; /* Expires in the future */
182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
184 static int dev_memalloc_noio(struct device *dev, void *data)
186 return dev->power.memalloc_noio;
190 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
191 * @dev: Device to handle.
192 * @enable: True for setting the flag and False for clearing the flag.
194 * Set the flag for all devices in the path from the device to the
195 * root device in the device tree if @enable is true, otherwise clear
196 * the flag for devices in the path whose siblings don't set the flag.
198 * The function should only be called by block device, or network
199 * device driver for solving the deadlock problem during runtime
202 * If memory allocation with GFP_KERNEL is called inside runtime
203 * resume/suspend callback of any one of its ancestors(or the
204 * block device itself), the deadlock may be triggered inside the
205 * memory allocation since it might not complete until the block
206 * device becomes active and the involed page I/O finishes. The
207 * situation is pointed out first by Alan Stern. Network device
208 * are involved in iSCSI kind of situation.
210 * The lock of dev_hotplug_mutex is held in the function for handling
211 * hotplug race because pm_runtime_set_memalloc_noio() may be called
214 * The function should be called between device_add() and device_del()
215 * on the affected device(block/network device).
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
219 static DEFINE_MUTEX(dev_hotplug_mutex);
221 mutex_lock(&dev_hotplug_mutex);
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
232 * not need to enable ancestors any more if the device
235 if (enabled && enable)
241 * clear flag of the parent device only if all the
242 * children don't set the flag because ancestor's
243 * flag was set by any one of the descendants.
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
250 mutex_unlock(&dev_hotplug_mutex);
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
258 static int rpm_check_suspend_allowed(struct device *dev)
262 if (dev->power.runtime_error)
264 else if (dev->power.disable_depth > 0)
266 else if (atomic_read(&dev->power.usage_count))
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
272 /* Pending resume requests take precedence over suspends. */
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
286 static int rpm_get_suppliers(struct device *dev)
288 struct device_link *link;
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291 device_links_read_lock_held()) {
294 if (!(link->flags & DL_FLAG_PM_RUNTIME))
297 retval = pm_runtime_get_sync(link->supplier);
298 /* Ignore suppliers with disabled runtime PM. */
299 if (retval < 0 && retval != -EACCES) {
300 pm_runtime_put_noidle(link->supplier);
303 refcount_inc(&link->rpm_active);
309 * pm_runtime_release_supplier - Drop references to device link's supplier.
310 * @link: Target device link.
312 * Drop all runtime PM references associated with @link to its supplier device.
314 void pm_runtime_release_supplier(struct device_link *link)
316 struct device *supplier = link->supplier;
319 * The additional power.usage_count check is a safety net in case
320 * the rpm_active refcount becomes saturated, in which case
321 * refcount_dec_not_one() would return true forever, but it is not
322 * strictly necessary.
324 while (refcount_dec_not_one(&link->rpm_active) &&
325 atomic_read(&supplier->power.usage_count) > 0)
326 pm_runtime_put_noidle(supplier);
329 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
331 struct device_link *link;
333 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
334 device_links_read_lock_held()) {
335 pm_runtime_release_supplier(link);
337 pm_request_idle(link->supplier);
341 static void rpm_put_suppliers(struct device *dev)
343 __rpm_put_suppliers(dev, true);
346 static void rpm_suspend_suppliers(struct device *dev)
348 struct device_link *link;
349 int idx = device_links_read_lock();
351 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
352 device_links_read_lock_held())
353 pm_request_idle(link->supplier);
355 device_links_read_unlock(idx);
359 * __rpm_callback - Run a given runtime PM callback for a given device.
360 * @cb: Runtime PM callback to run.
361 * @dev: Device to run the callback for.
363 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
364 __releases(&dev->power.lock) __acquires(&dev->power.lock)
367 bool use_links = dev->power.links_count > 0;
369 if (dev->power.irq_safe) {
370 spin_unlock(&dev->power.lock);
372 spin_unlock_irq(&dev->power.lock);
375 * Resume suppliers if necessary.
377 * The device's runtime PM status cannot change until this
378 * routine returns, so it is safe to read the status outside of
381 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
382 idx = device_links_read_lock();
384 retval = rpm_get_suppliers(dev);
386 rpm_put_suppliers(dev);
390 device_links_read_unlock(idx);
397 if (dev->power.irq_safe) {
398 spin_lock(&dev->power.lock);
401 * If the device is suspending and the callback has returned
402 * success, drop the usage counters of the suppliers that have
403 * been reference counted on its resume.
405 * Do that if resume fails too.
408 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
409 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
410 idx = device_links_read_lock();
412 __rpm_put_suppliers(dev, false);
415 device_links_read_unlock(idx);
418 spin_lock_irq(&dev->power.lock);
425 * rpm_idle - Notify device bus type if the device can be suspended.
426 * @dev: Device to notify the bus type about.
427 * @rpmflags: Flag bits.
429 * Check if the device's runtime PM status allows it to be suspended. If
430 * another idle notification has been started earlier, return immediately. If
431 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
432 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
433 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
435 * This function must be called under dev->power.lock with interrupts disabled.
437 static int rpm_idle(struct device *dev, int rpmflags)
439 int (*callback)(struct device *);
442 trace_rpm_idle_rcuidle(dev, rpmflags);
443 retval = rpm_check_suspend_allowed(dev);
445 ; /* Conditions are wrong. */
447 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
448 else if (dev->power.runtime_status != RPM_ACTIVE)
452 * Any pending request other than an idle notification takes
453 * precedence over us, except that the timer may be running.
455 else if (dev->power.request_pending &&
456 dev->power.request > RPM_REQ_IDLE)
459 /* Act as though RPM_NOWAIT is always set. */
460 else if (dev->power.idle_notification)
461 retval = -EINPROGRESS;
465 /* Pending requests need to be canceled. */
466 dev->power.request = RPM_REQ_NONE;
468 callback = RPM_GET_CALLBACK(dev, runtime_idle);
470 /* If no callback assume success. */
471 if (!callback || dev->power.no_callbacks)
474 /* Carry out an asynchronous or a synchronous idle notification. */
475 if (rpmflags & RPM_ASYNC) {
476 dev->power.request = RPM_REQ_IDLE;
477 if (!dev->power.request_pending) {
478 dev->power.request_pending = true;
479 queue_work(pm_wq, &dev->power.work);
481 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
485 dev->power.idle_notification = true;
487 retval = __rpm_callback(callback, dev);
489 dev->power.idle_notification = false;
490 wake_up_all(&dev->power.wait_queue);
493 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
494 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
498 * rpm_callback - Run a given runtime PM callback for a given device.
499 * @cb: Runtime PM callback to run.
500 * @dev: Device to run the callback for.
502 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
506 if (dev->power.memalloc_noio) {
507 unsigned int noio_flag;
510 * Deadlock might be caused if memory allocation with
511 * GFP_KERNEL happens inside runtime_suspend and
512 * runtime_resume callbacks of one block device's
513 * ancestor or the block device itself. Network
514 * device might be thought as part of iSCSI block
515 * device, so network device and its ancestor should
516 * be marked as memalloc_noio too.
518 noio_flag = memalloc_noio_save();
519 retval = __rpm_callback(cb, dev);
520 memalloc_noio_restore(noio_flag);
522 retval = __rpm_callback(cb, dev);
525 dev->power.runtime_error = retval;
526 return retval != -EACCES ? retval : -EIO;
530 * rpm_suspend - Carry out runtime suspend of given device.
531 * @dev: Device to suspend.
532 * @rpmflags: Flag bits.
534 * Check if the device's runtime PM status allows it to be suspended.
535 * Cancel a pending idle notification, autosuspend or suspend. If
536 * another suspend has been started earlier, either return immediately
537 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
538 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
539 * otherwise run the ->runtime_suspend() callback directly. When
540 * ->runtime_suspend succeeded, if a deferred resume was requested while
541 * the callback was running then carry it out, otherwise send an idle
542 * notification for its parent (if the suspend succeeded and both
543 * ignore_children of parent->power and irq_safe of dev->power are not set).
544 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
545 * flag is set and the next autosuspend-delay expiration time is in the
546 * future, schedule another autosuspend attempt.
548 * This function must be called under dev->power.lock with interrupts disabled.
550 static int rpm_suspend(struct device *dev, int rpmflags)
551 __releases(&dev->power.lock) __acquires(&dev->power.lock)
553 int (*callback)(struct device *);
554 struct device *parent = NULL;
557 trace_rpm_suspend_rcuidle(dev, rpmflags);
560 retval = rpm_check_suspend_allowed(dev);
562 goto out; /* Conditions are wrong. */
564 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
565 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
570 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
571 if ((rpmflags & RPM_AUTO)
572 && dev->power.runtime_status != RPM_SUSPENDING) {
573 u64 expires = pm_runtime_autosuspend_expiration(dev);
576 /* Pending requests need to be canceled. */
577 dev->power.request = RPM_REQ_NONE;
580 * Optimization: If the timer is already running and is
581 * set to expire at or before the autosuspend delay,
582 * avoid the overhead of resetting it. Just let it
583 * expire; pm_suspend_timer_fn() will take care of the
586 if (!(dev->power.timer_expires &&
587 dev->power.timer_expires <= expires)) {
589 * We add a slack of 25% to gather wakeups
590 * without sacrificing the granularity.
592 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
593 (NSEC_PER_MSEC >> 2);
595 dev->power.timer_expires = expires;
596 hrtimer_start_range_ns(&dev->power.suspend_timer,
597 ns_to_ktime(expires),
601 dev->power.timer_autosuspends = 1;
606 /* Other scheduled or pending requests need to be canceled. */
607 pm_runtime_cancel_pending(dev);
609 if (dev->power.runtime_status == RPM_SUSPENDING) {
612 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
613 retval = -EINPROGRESS;
617 if (dev->power.irq_safe) {
618 spin_unlock(&dev->power.lock);
622 spin_lock(&dev->power.lock);
626 /* Wait for the other suspend running in parallel with us. */
628 prepare_to_wait(&dev->power.wait_queue, &wait,
629 TASK_UNINTERRUPTIBLE);
630 if (dev->power.runtime_status != RPM_SUSPENDING)
633 spin_unlock_irq(&dev->power.lock);
637 spin_lock_irq(&dev->power.lock);
639 finish_wait(&dev->power.wait_queue, &wait);
643 if (dev->power.no_callbacks)
644 goto no_callback; /* Assume success. */
646 /* Carry out an asynchronous or a synchronous suspend. */
647 if (rpmflags & RPM_ASYNC) {
648 dev->power.request = (rpmflags & RPM_AUTO) ?
649 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
650 if (!dev->power.request_pending) {
651 dev->power.request_pending = true;
652 queue_work(pm_wq, &dev->power.work);
657 __update_runtime_status(dev, RPM_SUSPENDING);
659 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
661 dev_pm_enable_wake_irq_check(dev, true);
662 retval = rpm_callback(callback, dev);
666 dev_pm_enable_wake_irq_complete(dev);
669 __update_runtime_status(dev, RPM_SUSPENDED);
670 pm_runtime_deactivate_timer(dev);
673 parent = dev->parent;
674 atomic_add_unless(&parent->power.child_count, -1, 0);
676 wake_up_all(&dev->power.wait_queue);
678 if (dev->power.deferred_resume) {
679 dev->power.deferred_resume = false;
685 if (dev->power.irq_safe)
688 /* Maybe the parent is now able to suspend. */
689 if (parent && !parent->power.ignore_children) {
690 spin_unlock(&dev->power.lock);
692 spin_lock(&parent->power.lock);
693 rpm_idle(parent, RPM_ASYNC);
694 spin_unlock(&parent->power.lock);
696 spin_lock(&dev->power.lock);
698 /* Maybe the suppliers are now able to suspend. */
699 if (dev->power.links_count > 0) {
700 spin_unlock_irq(&dev->power.lock);
702 rpm_suspend_suppliers(dev);
704 spin_lock_irq(&dev->power.lock);
708 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
713 dev_pm_disable_wake_irq_check(dev, true);
714 __update_runtime_status(dev, RPM_ACTIVE);
715 dev->power.deferred_resume = false;
716 wake_up_all(&dev->power.wait_queue);
718 if (retval == -EAGAIN || retval == -EBUSY) {
719 dev->power.runtime_error = 0;
722 * If the callback routine failed an autosuspend, and
723 * if the last_busy time has been updated so that there
724 * is a new autosuspend expiration time, automatically
725 * reschedule another autosuspend.
727 if ((rpmflags & RPM_AUTO) &&
728 pm_runtime_autosuspend_expiration(dev) != 0)
731 pm_runtime_cancel_pending(dev);
737 * rpm_resume - Carry out runtime resume of given device.
738 * @dev: Device to resume.
739 * @rpmflags: Flag bits.
741 * Check if the device's runtime PM status allows it to be resumed. Cancel
742 * any scheduled or pending requests. If another resume has been started
743 * earlier, either return immediately or wait for it to finish, depending on the
744 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
745 * parallel with this function, either tell the other process to resume after
746 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
747 * flag is set then queue a resume request; otherwise run the
748 * ->runtime_resume() callback directly. Queue an idle notification for the
749 * device if the resume succeeded.
751 * This function must be called under dev->power.lock with interrupts disabled.
753 static int rpm_resume(struct device *dev, int rpmflags)
754 __releases(&dev->power.lock) __acquires(&dev->power.lock)
756 int (*callback)(struct device *);
757 struct device *parent = NULL;
760 trace_rpm_resume_rcuidle(dev, rpmflags);
763 if (dev->power.runtime_error) {
765 } else if (dev->power.disable_depth > 0) {
766 if (dev->power.runtime_status == RPM_ACTIVE &&
767 dev->power.last_status == RPM_ACTIVE)
776 * Other scheduled or pending requests need to be canceled. Small
777 * optimization: If an autosuspend timer is running, leave it running
778 * rather than cancelling it now only to restart it again in the near
781 dev->power.request = RPM_REQ_NONE;
782 if (!dev->power.timer_autosuspends)
783 pm_runtime_deactivate_timer(dev);
785 if (dev->power.runtime_status == RPM_ACTIVE) {
790 if (dev->power.runtime_status == RPM_RESUMING
791 || dev->power.runtime_status == RPM_SUSPENDING) {
794 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
795 if (dev->power.runtime_status == RPM_SUSPENDING)
796 dev->power.deferred_resume = true;
798 retval = -EINPROGRESS;
802 if (dev->power.irq_safe) {
803 spin_unlock(&dev->power.lock);
807 spin_lock(&dev->power.lock);
811 /* Wait for the operation carried out in parallel with us. */
813 prepare_to_wait(&dev->power.wait_queue, &wait,
814 TASK_UNINTERRUPTIBLE);
815 if (dev->power.runtime_status != RPM_RESUMING
816 && dev->power.runtime_status != RPM_SUSPENDING)
819 spin_unlock_irq(&dev->power.lock);
823 spin_lock_irq(&dev->power.lock);
825 finish_wait(&dev->power.wait_queue, &wait);
830 * See if we can skip waking up the parent. This is safe only if
831 * power.no_callbacks is set, because otherwise we don't know whether
832 * the resume will actually succeed.
834 if (dev->power.no_callbacks && !parent && dev->parent) {
835 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
836 if (dev->parent->power.disable_depth > 0
837 || dev->parent->power.ignore_children
838 || dev->parent->power.runtime_status == RPM_ACTIVE) {
839 atomic_inc(&dev->parent->power.child_count);
840 spin_unlock(&dev->parent->power.lock);
842 goto no_callback; /* Assume success. */
844 spin_unlock(&dev->parent->power.lock);
847 /* Carry out an asynchronous or a synchronous resume. */
848 if (rpmflags & RPM_ASYNC) {
849 dev->power.request = RPM_REQ_RESUME;
850 if (!dev->power.request_pending) {
851 dev->power.request_pending = true;
852 queue_work(pm_wq, &dev->power.work);
858 if (!parent && dev->parent) {
860 * Increment the parent's usage counter and resume it if
861 * necessary. Not needed if dev is irq-safe; then the
862 * parent is permanently resumed.
864 parent = dev->parent;
865 if (dev->power.irq_safe)
867 spin_unlock(&dev->power.lock);
869 pm_runtime_get_noresume(parent);
871 spin_lock(&parent->power.lock);
873 * Resume the parent if it has runtime PM enabled and not been
874 * set to ignore its children.
876 if (!parent->power.disable_depth
877 && !parent->power.ignore_children) {
878 rpm_resume(parent, 0);
879 if (parent->power.runtime_status != RPM_ACTIVE)
882 spin_unlock(&parent->power.lock);
884 spin_lock(&dev->power.lock);
891 if (dev->power.no_callbacks)
892 goto no_callback; /* Assume success. */
894 __update_runtime_status(dev, RPM_RESUMING);
896 callback = RPM_GET_CALLBACK(dev, runtime_resume);
898 dev_pm_disable_wake_irq_check(dev, false);
899 retval = rpm_callback(callback, dev);
901 __update_runtime_status(dev, RPM_SUSPENDED);
902 pm_runtime_cancel_pending(dev);
903 dev_pm_enable_wake_irq_check(dev, false);
906 __update_runtime_status(dev, RPM_ACTIVE);
907 pm_runtime_mark_last_busy(dev);
909 atomic_inc(&parent->power.child_count);
911 wake_up_all(&dev->power.wait_queue);
914 rpm_idle(dev, RPM_ASYNC);
917 if (parent && !dev->power.irq_safe) {
918 spin_unlock_irq(&dev->power.lock);
920 pm_runtime_put(parent);
922 spin_lock_irq(&dev->power.lock);
925 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
931 * pm_runtime_work - Universal runtime PM work function.
932 * @work: Work structure used for scheduling the execution of this function.
934 * Use @work to get the device object the work is to be done for, determine what
935 * is to be done and execute the appropriate runtime PM function.
937 static void pm_runtime_work(struct work_struct *work)
939 struct device *dev = container_of(work, struct device, power.work);
940 enum rpm_request req;
942 spin_lock_irq(&dev->power.lock);
944 if (!dev->power.request_pending)
947 req = dev->power.request;
948 dev->power.request = RPM_REQ_NONE;
949 dev->power.request_pending = false;
955 rpm_idle(dev, RPM_NOWAIT);
957 case RPM_REQ_SUSPEND:
958 rpm_suspend(dev, RPM_NOWAIT);
960 case RPM_REQ_AUTOSUSPEND:
961 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
964 rpm_resume(dev, RPM_NOWAIT);
969 spin_unlock_irq(&dev->power.lock);
973 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
974 * @timer: hrtimer used by pm_schedule_suspend().
976 * Check if the time is right and queue a suspend request.
978 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
980 struct device *dev = container_of(timer, struct device, power.suspend_timer);
984 spin_lock_irqsave(&dev->power.lock, flags);
986 expires = dev->power.timer_expires;
988 * If 'expires' is after the current time, we've been called
991 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
992 dev->power.timer_expires = 0;
993 rpm_suspend(dev, dev->power.timer_autosuspends ?
994 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
997 spin_unlock_irqrestore(&dev->power.lock, flags);
999 return HRTIMER_NORESTART;
1003 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1004 * @dev: Device to suspend.
1005 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1007 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1009 unsigned long flags;
1013 spin_lock_irqsave(&dev->power.lock, flags);
1016 retval = rpm_suspend(dev, RPM_ASYNC);
1020 retval = rpm_check_suspend_allowed(dev);
1024 /* Other scheduled or pending requests need to be canceled. */
1025 pm_runtime_cancel_pending(dev);
1027 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1028 dev->power.timer_expires = expires;
1029 dev->power.timer_autosuspends = 0;
1030 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1033 spin_unlock_irqrestore(&dev->power.lock, flags);
1037 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1039 static int rpm_drop_usage_count(struct device *dev)
1043 ret = atomic_sub_return(1, &dev->power.usage_count);
1048 * Because rpm_resume() does not check the usage counter, it will resume
1049 * the device even if the usage counter is 0 or negative, so it is
1050 * sufficient to increment the usage counter here to reverse the change
1053 atomic_inc(&dev->power.usage_count);
1054 dev_warn(dev, "Runtime PM usage count underflow!\n");
1059 * __pm_runtime_idle - Entry point for runtime idle operations.
1060 * @dev: Device to send idle notification for.
1061 * @rpmflags: Flag bits.
1063 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1064 * return immediately if it is larger than zero (if it becomes negative, log a
1065 * warning, increment it, and return an error). Then carry out an idle
1066 * notification, either synchronous or asynchronous.
1068 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1069 * or if pm_runtime_irq_safe() has been called.
1071 int __pm_runtime_idle(struct device *dev, int rpmflags)
1073 unsigned long flags;
1076 if (rpmflags & RPM_GET_PUT) {
1077 retval = rpm_drop_usage_count(dev);
1080 } else if (retval > 0) {
1081 trace_rpm_usage_rcuidle(dev, rpmflags);
1086 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1088 spin_lock_irqsave(&dev->power.lock, flags);
1089 retval = rpm_idle(dev, rpmflags);
1090 spin_unlock_irqrestore(&dev->power.lock, flags);
1094 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1097 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1098 * @dev: Device to suspend.
1099 * @rpmflags: Flag bits.
1101 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1102 * return immediately if it is larger than zero (if it becomes negative, log a
1103 * warning, increment it, and return an error). Then carry out a suspend,
1104 * either synchronous or asynchronous.
1106 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1107 * or if pm_runtime_irq_safe() has been called.
1109 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1111 unsigned long flags;
1114 if (rpmflags & RPM_GET_PUT) {
1115 retval = rpm_drop_usage_count(dev);
1118 } else if (retval > 0) {
1119 trace_rpm_usage_rcuidle(dev, rpmflags);
1124 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1126 spin_lock_irqsave(&dev->power.lock, flags);
1127 retval = rpm_suspend(dev, rpmflags);
1128 spin_unlock_irqrestore(&dev->power.lock, flags);
1132 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1135 * __pm_runtime_resume - Entry point for runtime resume operations.
1136 * @dev: Device to resume.
1137 * @rpmflags: Flag bits.
1139 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1140 * carry out a resume, either synchronous or asynchronous.
1142 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1143 * or if pm_runtime_irq_safe() has been called.
1145 int __pm_runtime_resume(struct device *dev, int rpmflags)
1147 unsigned long flags;
1150 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1151 dev->power.runtime_status != RPM_ACTIVE);
1153 if (rpmflags & RPM_GET_PUT)
1154 atomic_inc(&dev->power.usage_count);
1156 spin_lock_irqsave(&dev->power.lock, flags);
1157 retval = rpm_resume(dev, rpmflags);
1158 spin_unlock_irqrestore(&dev->power.lock, flags);
1162 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1165 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1166 * @dev: Device to handle.
1167 * @ign_usage_count: Whether or not to look at the current usage counter value.
1169 * Return -EINVAL if runtime PM is disabled for @dev.
1171 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1172 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1173 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1174 * without changing the usage counter.
1176 * If @ign_usage_count is %true, this function can be used to prevent suspending
1177 * the device when its runtime PM status is %RPM_ACTIVE.
1179 * If @ign_usage_count is %false, this function can be used to prevent
1180 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1181 * runtime PM usage counter is not zero.
1183 * The caller is responsible for decrementing the runtime PM usage counter of
1184 * @dev after this function has returned a positive value for it.
1186 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1188 unsigned long flags;
1191 spin_lock_irqsave(&dev->power.lock, flags);
1192 if (dev->power.disable_depth > 0) {
1194 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1196 } else if (ign_usage_count) {
1198 atomic_inc(&dev->power.usage_count);
1200 retval = atomic_inc_not_zero(&dev->power.usage_count);
1202 trace_rpm_usage_rcuidle(dev, 0);
1203 spin_unlock_irqrestore(&dev->power.lock, flags);
1207 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1210 * __pm_runtime_set_status - Set runtime PM status of a device.
1211 * @dev: Device to handle.
1212 * @status: New runtime PM status of the device.
1214 * If runtime PM of the device is disabled or its power.runtime_error field is
1215 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1216 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1217 * However, if the device has a parent and the parent is not active, and the
1218 * parent's power.ignore_children flag is unset, the device's status cannot be
1219 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1221 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1222 * and the device parent's counter of unsuspended children is modified to
1223 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1224 * notification request for the parent is submitted.
1226 * If @dev has any suppliers (as reflected by device links to them), and @status
1227 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1228 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1229 * of the @status value) and the suppliers will be deacticated on exit. The
1230 * error returned by the failing supplier activation will be returned in that
1233 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1235 struct device *parent = dev->parent;
1236 bool notify_parent = false;
1237 unsigned long flags;
1240 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1243 spin_lock_irqsave(&dev->power.lock, flags);
1246 * Prevent PM-runtime from being enabled for the device or return an
1247 * error if it is enabled already and working.
1249 if (dev->power.runtime_error || dev->power.disable_depth)
1250 dev->power.disable_depth++;
1254 spin_unlock_irqrestore(&dev->power.lock, flags);
1260 * If the new status is RPM_ACTIVE, the suppliers can be activated
1261 * upfront regardless of the current status, because next time
1262 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1263 * involved will be dropped down to one anyway.
1265 if (status == RPM_ACTIVE) {
1266 int idx = device_links_read_lock();
1268 error = rpm_get_suppliers(dev);
1270 status = RPM_SUSPENDED;
1272 device_links_read_unlock(idx);
1275 spin_lock_irqsave(&dev->power.lock, flags);
1277 if (dev->power.runtime_status == status || !parent)
1280 if (status == RPM_SUSPENDED) {
1281 atomic_add_unless(&parent->power.child_count, -1, 0);
1282 notify_parent = !parent->power.ignore_children;
1284 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1287 * It is invalid to put an active child under a parent that is
1288 * not active, has runtime PM enabled and the
1289 * 'power.ignore_children' flag unset.
1291 if (!parent->power.disable_depth
1292 && !parent->power.ignore_children
1293 && parent->power.runtime_status != RPM_ACTIVE) {
1294 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1298 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1299 atomic_inc(&parent->power.child_count);
1302 spin_unlock(&parent->power.lock);
1305 status = RPM_SUSPENDED;
1311 __update_runtime_status(dev, status);
1313 dev->power.runtime_error = 0;
1316 spin_unlock_irqrestore(&dev->power.lock, flags);
1319 pm_request_idle(parent);
1321 if (status == RPM_SUSPENDED) {
1322 int idx = device_links_read_lock();
1324 rpm_put_suppliers(dev);
1326 device_links_read_unlock(idx);
1329 pm_runtime_enable(dev);
1333 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1336 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1337 * @dev: Device to handle.
1339 * Flush all pending requests for the device from pm_wq and wait for all
1340 * runtime PM operations involving the device in progress to complete.
1342 * Should be called under dev->power.lock with interrupts disabled.
1344 static void __pm_runtime_barrier(struct device *dev)
1346 pm_runtime_deactivate_timer(dev);
1348 if (dev->power.request_pending) {
1349 dev->power.request = RPM_REQ_NONE;
1350 spin_unlock_irq(&dev->power.lock);
1352 cancel_work_sync(&dev->power.work);
1354 spin_lock_irq(&dev->power.lock);
1355 dev->power.request_pending = false;
1358 if (dev->power.runtime_status == RPM_SUSPENDING
1359 || dev->power.runtime_status == RPM_RESUMING
1360 || dev->power.idle_notification) {
1363 /* Suspend, wake-up or idle notification in progress. */
1365 prepare_to_wait(&dev->power.wait_queue, &wait,
1366 TASK_UNINTERRUPTIBLE);
1367 if (dev->power.runtime_status != RPM_SUSPENDING
1368 && dev->power.runtime_status != RPM_RESUMING
1369 && !dev->power.idle_notification)
1371 spin_unlock_irq(&dev->power.lock);
1375 spin_lock_irq(&dev->power.lock);
1377 finish_wait(&dev->power.wait_queue, &wait);
1382 * pm_runtime_barrier - Flush pending requests and wait for completions.
1383 * @dev: Device to handle.
1385 * Prevent the device from being suspended by incrementing its usage counter and
1386 * if there's a pending resume request for the device, wake the device up.
1387 * Next, make sure that all pending requests for the device have been flushed
1388 * from pm_wq and wait for all runtime PM operations involving the device in
1389 * progress to complete.
1392 * 1, if there was a resume request pending and the device had to be woken up,
1395 int pm_runtime_barrier(struct device *dev)
1399 pm_runtime_get_noresume(dev);
1400 spin_lock_irq(&dev->power.lock);
1402 if (dev->power.request_pending
1403 && dev->power.request == RPM_REQ_RESUME) {
1408 __pm_runtime_barrier(dev);
1410 spin_unlock_irq(&dev->power.lock);
1411 pm_runtime_put_noidle(dev);
1415 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1418 * __pm_runtime_disable - Disable runtime PM of a device.
1419 * @dev: Device to handle.
1420 * @check_resume: If set, check if there's a resume request for the device.
1422 * Increment power.disable_depth for the device and if it was zero previously,
1423 * cancel all pending runtime PM requests for the device and wait for all
1424 * operations in progress to complete. The device can be either active or
1425 * suspended after its runtime PM has been disabled.
1427 * If @check_resume is set and there's a resume request pending when
1428 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1429 * function will wake up the device before disabling its runtime PM.
1431 void __pm_runtime_disable(struct device *dev, bool check_resume)
1433 spin_lock_irq(&dev->power.lock);
1435 if (dev->power.disable_depth > 0) {
1436 dev->power.disable_depth++;
1441 * Wake up the device if there's a resume request pending, because that
1442 * means there probably is some I/O to process and disabling runtime PM
1443 * shouldn't prevent the device from processing the I/O.
1445 if (check_resume && dev->power.request_pending
1446 && dev->power.request == RPM_REQ_RESUME) {
1448 * Prevent suspends and idle notifications from being carried
1449 * out after we have woken up the device.
1451 pm_runtime_get_noresume(dev);
1455 pm_runtime_put_noidle(dev);
1458 /* Update time accounting before disabling PM-runtime. */
1459 update_pm_runtime_accounting(dev);
1461 if (!dev->power.disable_depth++) {
1462 __pm_runtime_barrier(dev);
1463 dev->power.last_status = dev->power.runtime_status;
1467 spin_unlock_irq(&dev->power.lock);
1469 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1472 * pm_runtime_enable - Enable runtime PM of a device.
1473 * @dev: Device to handle.
1475 void pm_runtime_enable(struct device *dev)
1477 unsigned long flags;
1479 spin_lock_irqsave(&dev->power.lock, flags);
1481 if (!dev->power.disable_depth) {
1482 dev_warn(dev, "Unbalanced %s!\n", __func__);
1486 if (--dev->power.disable_depth > 0)
1489 dev->power.last_status = RPM_INVALID;
1490 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1492 if (dev->power.runtime_status == RPM_SUSPENDED &&
1493 !dev->power.ignore_children &&
1494 atomic_read(&dev->power.child_count) > 0)
1495 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1498 spin_unlock_irqrestore(&dev->power.lock, flags);
1500 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1502 static void pm_runtime_disable_action(void *data)
1504 pm_runtime_dont_use_autosuspend(data);
1505 pm_runtime_disable(data);
1509 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1511 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1512 * you at driver exit time if needed.
1514 * @dev: Device to handle.
1516 int devm_pm_runtime_enable(struct device *dev)
1518 pm_runtime_enable(dev);
1520 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1522 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1525 * pm_runtime_forbid - Block runtime PM of a device.
1526 * @dev: Device to handle.
1528 * Increase the device's usage count and clear its power.runtime_auto flag,
1529 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1532 void pm_runtime_forbid(struct device *dev)
1534 spin_lock_irq(&dev->power.lock);
1535 if (!dev->power.runtime_auto)
1538 dev->power.runtime_auto = false;
1539 atomic_inc(&dev->power.usage_count);
1543 spin_unlock_irq(&dev->power.lock);
1545 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1548 * pm_runtime_allow - Unblock runtime PM of a device.
1549 * @dev: Device to handle.
1551 * Decrease the device's usage count and set its power.runtime_auto flag.
1553 void pm_runtime_allow(struct device *dev)
1557 spin_lock_irq(&dev->power.lock);
1558 if (dev->power.runtime_auto)
1561 dev->power.runtime_auto = true;
1562 ret = rpm_drop_usage_count(dev);
1564 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1566 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1569 spin_unlock_irq(&dev->power.lock);
1571 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1574 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1575 * @dev: Device to handle.
1577 * Set the power.no_callbacks flag, which tells the PM core that this
1578 * device is power-managed through its parent and has no runtime PM
1579 * callbacks of its own. The runtime sysfs attributes will be removed.
1581 void pm_runtime_no_callbacks(struct device *dev)
1583 spin_lock_irq(&dev->power.lock);
1584 dev->power.no_callbacks = 1;
1585 spin_unlock_irq(&dev->power.lock);
1586 if (device_is_registered(dev))
1587 rpm_sysfs_remove(dev);
1589 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1592 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1593 * @dev: Device to handle
1595 * Set the power.irq_safe flag, which tells the PM core that the
1596 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1597 * always be invoked with the spinlock held and interrupts disabled. It also
1598 * causes the parent's usage counter to be permanently incremented, preventing
1599 * the parent from runtime suspending -- otherwise an irq-safe child might have
1600 * to wait for a non-irq-safe parent.
1602 void pm_runtime_irq_safe(struct device *dev)
1605 pm_runtime_get_sync(dev->parent);
1606 spin_lock_irq(&dev->power.lock);
1607 dev->power.irq_safe = 1;
1608 spin_unlock_irq(&dev->power.lock);
1610 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1613 * update_autosuspend - Handle a change to a device's autosuspend settings.
1614 * @dev: Device to handle.
1615 * @old_delay: The former autosuspend_delay value.
1616 * @old_use: The former use_autosuspend value.
1618 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1619 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1621 * This function must be called under dev->power.lock with interrupts disabled.
1623 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1625 int delay = dev->power.autosuspend_delay;
1627 /* Should runtime suspend be prevented now? */
1628 if (dev->power.use_autosuspend && delay < 0) {
1630 /* If it used to be allowed then prevent it. */
1631 if (!old_use || old_delay >= 0) {
1632 atomic_inc(&dev->power.usage_count);
1635 trace_rpm_usage_rcuidle(dev, 0);
1639 /* Runtime suspend should be allowed now. */
1642 /* If it used to be prevented then allow it. */
1643 if (old_use && old_delay < 0)
1644 atomic_dec(&dev->power.usage_count);
1646 /* Maybe we can autosuspend now. */
1647 rpm_idle(dev, RPM_AUTO);
1652 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1653 * @dev: Device to handle.
1654 * @delay: Value of the new delay in milliseconds.
1656 * Set the device's power.autosuspend_delay value. If it changes to negative
1657 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1658 * changes the other way, allow runtime suspends.
1660 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1662 int old_delay, old_use;
1664 spin_lock_irq(&dev->power.lock);
1665 old_delay = dev->power.autosuspend_delay;
1666 old_use = dev->power.use_autosuspend;
1667 dev->power.autosuspend_delay = delay;
1668 update_autosuspend(dev, old_delay, old_use);
1669 spin_unlock_irq(&dev->power.lock);
1671 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1674 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1675 * @dev: Device to handle.
1676 * @use: New value for use_autosuspend.
1678 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1679 * suspends as needed.
1681 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1683 int old_delay, old_use;
1685 spin_lock_irq(&dev->power.lock);
1686 old_delay = dev->power.autosuspend_delay;
1687 old_use = dev->power.use_autosuspend;
1688 dev->power.use_autosuspend = use;
1689 update_autosuspend(dev, old_delay, old_use);
1690 spin_unlock_irq(&dev->power.lock);
1692 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1695 * pm_runtime_init - Initialize runtime PM fields in given device object.
1696 * @dev: Device object to initialize.
1698 void pm_runtime_init(struct device *dev)
1700 dev->power.runtime_status = RPM_SUSPENDED;
1701 dev->power.last_status = RPM_INVALID;
1702 dev->power.idle_notification = false;
1704 dev->power.disable_depth = 1;
1705 atomic_set(&dev->power.usage_count, 0);
1707 dev->power.runtime_error = 0;
1709 atomic_set(&dev->power.child_count, 0);
1710 pm_suspend_ignore_children(dev, false);
1711 dev->power.runtime_auto = true;
1713 dev->power.request_pending = false;
1714 dev->power.request = RPM_REQ_NONE;
1715 dev->power.deferred_resume = false;
1716 dev->power.needs_force_resume = 0;
1717 INIT_WORK(&dev->power.work, pm_runtime_work);
1719 dev->power.timer_expires = 0;
1720 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1721 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1723 init_waitqueue_head(&dev->power.wait_queue);
1727 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1728 * @dev: Device object to re-initialize.
1730 void pm_runtime_reinit(struct device *dev)
1732 if (!pm_runtime_enabled(dev)) {
1733 if (dev->power.runtime_status == RPM_ACTIVE)
1734 pm_runtime_set_suspended(dev);
1735 if (dev->power.irq_safe) {
1736 spin_lock_irq(&dev->power.lock);
1737 dev->power.irq_safe = 0;
1738 spin_unlock_irq(&dev->power.lock);
1740 pm_runtime_put(dev->parent);
1746 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1747 * @dev: Device object being removed from device hierarchy.
1749 void pm_runtime_remove(struct device *dev)
1751 __pm_runtime_disable(dev, false);
1752 pm_runtime_reinit(dev);
1756 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1757 * @dev: Consumer device.
1759 void pm_runtime_get_suppliers(struct device *dev)
1761 struct device_link *link;
1764 idx = device_links_read_lock();
1766 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1767 device_links_read_lock_held())
1768 if (link->flags & DL_FLAG_PM_RUNTIME) {
1769 link->supplier_preactivated = true;
1770 pm_runtime_get_sync(link->supplier);
1773 device_links_read_unlock(idx);
1777 * pm_runtime_put_suppliers - Drop references to supplier devices.
1778 * @dev: Consumer device.
1780 void pm_runtime_put_suppliers(struct device *dev)
1782 struct device_link *link;
1785 idx = device_links_read_lock();
1787 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1788 device_links_read_lock_held())
1789 if (link->supplier_preactivated) {
1790 link->supplier_preactivated = false;
1791 pm_runtime_put(link->supplier);
1794 device_links_read_unlock(idx);
1797 void pm_runtime_new_link(struct device *dev)
1799 spin_lock_irq(&dev->power.lock);
1800 dev->power.links_count++;
1801 spin_unlock_irq(&dev->power.lock);
1804 static void pm_runtime_drop_link_count(struct device *dev)
1806 spin_lock_irq(&dev->power.lock);
1807 WARN_ON(dev->power.links_count == 0);
1808 dev->power.links_count--;
1809 spin_unlock_irq(&dev->power.lock);
1813 * pm_runtime_drop_link - Prepare for device link removal.
1814 * @link: Device link going away.
1816 * Drop the link count of the consumer end of @link and decrement the supplier
1817 * device's runtime PM usage counter as many times as needed to drop all of the
1818 * PM runtime reference to it from the consumer.
1820 void pm_runtime_drop_link(struct device_link *link)
1822 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1825 pm_runtime_drop_link_count(link->consumer);
1826 pm_runtime_release_supplier(link);
1827 pm_request_idle(link->supplier);
1830 static bool pm_runtime_need_not_resume(struct device *dev)
1832 return atomic_read(&dev->power.usage_count) <= 1 &&
1833 (atomic_read(&dev->power.child_count) == 0 ||
1834 dev->power.ignore_children);
1838 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1839 * @dev: Device to suspend.
1841 * Disable runtime PM so we safely can check the device's runtime PM status and
1842 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1843 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1844 * usage and children counters don't indicate that the device was in use before
1845 * the system-wide transition under way, decrement its parent's children counter
1846 * (if there is a parent). Keep runtime PM disabled to preserve the state
1847 * unless we encounter errors.
1849 * Typically this function may be invoked from a system suspend callback to make
1850 * sure the device is put into low power state and it should only be used during
1851 * system-wide PM transitions to sleep states. It assumes that the analogous
1852 * pm_runtime_force_resume() will be used to resume the device.
1854 int pm_runtime_force_suspend(struct device *dev)
1856 int (*callback)(struct device *);
1859 pm_runtime_disable(dev);
1860 if (pm_runtime_status_suspended(dev))
1863 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1865 ret = callback ? callback(dev) : 0;
1870 * If the device can stay in suspend after the system-wide transition
1871 * to the working state that will follow, drop the children counter of
1872 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1873 * function will be called again for it in the meantime.
1875 if (pm_runtime_need_not_resume(dev)) {
1876 pm_runtime_set_suspended(dev);
1878 __update_runtime_status(dev, RPM_SUSPENDED);
1879 dev->power.needs_force_resume = 1;
1885 pm_runtime_enable(dev);
1888 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1891 * pm_runtime_force_resume - Force a device into resume state if needed.
1892 * @dev: Device to resume.
1894 * Prior invoking this function we expect the user to have brought the device
1895 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1896 * those actions and bring the device into full power, if it is expected to be
1897 * used on system resume. In the other case, we defer the resume to be managed
1900 * Typically this function may be invoked from a system resume callback.
1902 int pm_runtime_force_resume(struct device *dev)
1904 int (*callback)(struct device *);
1907 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1911 * The value of the parent's children counter is correct already, so
1912 * just update the status of the device.
1914 __update_runtime_status(dev, RPM_ACTIVE);
1916 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1918 ret = callback ? callback(dev) : 0;
1920 pm_runtime_set_suspended(dev);
1924 pm_runtime_mark_last_busy(dev);
1926 dev->power.needs_force_resume = 0;
1927 pm_runtime_enable(dev);
1930 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);