1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <linux/rculist.h>
15 #include <trace/events/rpm.h>
20 typedef int (*pm_callback_t)(struct device *);
22 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
25 const struct dev_pm_ops *ops;
28 ops = &dev->pm_domain->ops;
29 else if (dev->type && dev->type->pm)
31 else if (dev->class && dev->class->pm)
33 else if (dev->bus && dev->bus->pm)
39 cb = *(pm_callback_t *)((void *)ops + cb_offset);
43 if (!cb && dev->driver && dev->driver->pm)
44 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
49 #define RPM_GET_CALLBACK(dev, callback) \
50 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
52 static int rpm_resume(struct device *dev, int rpmflags);
53 static int rpm_suspend(struct device *dev, int rpmflags);
56 * update_pm_runtime_accounting - Update the time accounting of power states
57 * @dev: Device to update the accounting for
59 * In order to be able to have time accounting of the various power states
60 * (as used by programs such as PowerTOP to show the effectiveness of runtime
61 * PM), we need to track the time spent in each state.
62 * update_pm_runtime_accounting must be called each time before the
63 * runtime_status field is updated, to account the time in the old state
66 static void update_pm_runtime_accounting(struct device *dev)
70 if (dev->power.disable_depth > 0)
73 last = dev->power.accounting_timestamp;
75 now = ktime_get_mono_fast_ns();
76 dev->power.accounting_timestamp = now;
79 * Because ktime_get_mono_fast_ns() is not monotonic during
80 * timekeeping updates, ensure that 'now' is after the last saved
88 if (dev->power.runtime_status == RPM_SUSPENDED)
89 dev->power.suspended_time += delta;
91 dev->power.active_time += delta;
94 static void __update_runtime_status(struct device *dev, enum rpm_status status)
96 update_pm_runtime_accounting(dev);
97 dev->power.runtime_status = status;
100 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
105 spin_lock_irqsave(&dev->power.lock, flags);
107 update_pm_runtime_accounting(dev);
108 time = suspended ? dev->power.suspended_time : dev->power.active_time;
110 spin_unlock_irqrestore(&dev->power.lock, flags);
115 u64 pm_runtime_active_time(struct device *dev)
117 return rpm_get_accounted_time(dev, false);
120 u64 pm_runtime_suspended_time(struct device *dev)
122 return rpm_get_accounted_time(dev, true);
124 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
127 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
128 * @dev: Device to handle.
130 static void pm_runtime_deactivate_timer(struct device *dev)
132 if (dev->power.timer_expires > 0) {
133 hrtimer_try_to_cancel(&dev->power.suspend_timer);
134 dev->power.timer_expires = 0;
139 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
140 * @dev: Device to handle.
142 static void pm_runtime_cancel_pending(struct device *dev)
144 pm_runtime_deactivate_timer(dev);
146 * In case there's a request pending, make sure its work function will
147 * return without doing anything.
149 dev->power.request = RPM_REQ_NONE;
153 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
154 * @dev: Device to handle.
156 * Compute the autosuspend-delay expiration time based on the device's
157 * power.last_busy time. If the delay has already expired or is disabled
158 * (negative) or the power.use_autosuspend flag isn't set, return 0.
159 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
161 * This function may be called either with or without dev->power.lock held.
162 * Either way it can be racy, since power.last_busy may be updated at any time.
164 u64 pm_runtime_autosuspend_expiration(struct device *dev)
166 int autosuspend_delay;
169 if (!dev->power.use_autosuspend)
172 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
173 if (autosuspend_delay < 0)
176 expires = READ_ONCE(dev->power.last_busy);
177 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
178 if (expires > ktime_get_mono_fast_ns())
179 return expires; /* Expires in the future */
183 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
185 static int dev_memalloc_noio(struct device *dev, void *data)
187 return dev->power.memalloc_noio;
191 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
192 * @dev: Device to handle.
193 * @enable: True for setting the flag and False for clearing the flag.
195 * Set the flag for all devices in the path from the device to the
196 * root device in the device tree if @enable is true, otherwise clear
197 * the flag for devices in the path whose siblings don't set the flag.
199 * The function should only be called by block device, or network
200 * device driver for solving the deadlock problem during runtime
203 * If memory allocation with GFP_KERNEL is called inside runtime
204 * resume/suspend callback of any one of its ancestors(or the
205 * block device itself), the deadlock may be triggered inside the
206 * memory allocation since it might not complete until the block
207 * device becomes active and the involed page I/O finishes. The
208 * situation is pointed out first by Alan Stern. Network device
209 * are involved in iSCSI kind of situation.
211 * The lock of dev_hotplug_mutex is held in the function for handling
212 * hotplug race because pm_runtime_set_memalloc_noio() may be called
215 * The function should be called between device_add() and device_del()
216 * on the affected device(block/network device).
218 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
220 static DEFINE_MUTEX(dev_hotplug_mutex);
222 mutex_lock(&dev_hotplug_mutex);
226 /* hold power lock since bitfield is not SMP-safe. */
227 spin_lock_irq(&dev->power.lock);
228 enabled = dev->power.memalloc_noio;
229 dev->power.memalloc_noio = enable;
230 spin_unlock_irq(&dev->power.lock);
233 * not need to enable ancestors any more if the device
236 if (enabled && enable)
242 * clear flag of the parent device only if all the
243 * children don't set the flag because ancestor's
244 * flag was set by any one of the descendants.
246 if (!dev || (!enable &&
247 device_for_each_child(dev, NULL, dev_memalloc_noio)))
250 mutex_unlock(&dev_hotplug_mutex);
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
255 * rpm_check_suspend_allowed - Test whether a device may be suspended.
256 * @dev: Device to test.
258 static int rpm_check_suspend_allowed(struct device *dev)
262 if (dev->power.runtime_error)
264 else if (dev->power.disable_depth > 0)
266 else if (atomic_read(&dev->power.usage_count))
268 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
271 /* Pending resume requests take precedence over suspends. */
272 else if ((dev->power.deferred_resume &&
273 dev->power.runtime_status == RPM_SUSPENDING) ||
274 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
276 else if (__dev_pm_qos_resume_latency(dev) == 0)
278 else if (dev->power.runtime_status == RPM_SUSPENDED)
284 static int rpm_get_suppliers(struct device *dev)
286 struct device_link *link;
288 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
289 device_links_read_lock_held()) {
292 if (!(link->flags & DL_FLAG_PM_RUNTIME))
295 retval = pm_runtime_get_sync(link->supplier);
296 /* Ignore suppliers with disabled runtime PM. */
297 if (retval < 0 && retval != -EACCES) {
298 pm_runtime_put_noidle(link->supplier);
301 refcount_inc(&link->rpm_active);
307 * pm_runtime_release_supplier - Drop references to device link's supplier.
308 * @link: Target device link.
310 * Drop all runtime PM references associated with @link to its supplier device.
312 void pm_runtime_release_supplier(struct device_link *link)
314 struct device *supplier = link->supplier;
317 * The additional power.usage_count check is a safety net in case
318 * the rpm_active refcount becomes saturated, in which case
319 * refcount_dec_not_one() would return true forever, but it is not
320 * strictly necessary.
322 while (refcount_dec_not_one(&link->rpm_active) &&
323 atomic_read(&supplier->power.usage_count) > 0)
324 pm_runtime_put_noidle(supplier);
327 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
329 struct device_link *link;
331 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
332 device_links_read_lock_held()) {
333 pm_runtime_release_supplier(link);
335 pm_request_idle(link->supplier);
339 static void rpm_put_suppliers(struct device *dev)
341 __rpm_put_suppliers(dev, true);
344 static void rpm_suspend_suppliers(struct device *dev)
346 struct device_link *link;
347 int idx = device_links_read_lock();
349 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
350 device_links_read_lock_held())
351 pm_request_idle(link->supplier);
353 device_links_read_unlock(idx);
357 * __rpm_callback - Run a given runtime PM callback for a given device.
358 * @cb: Runtime PM callback to run.
359 * @dev: Device to run the callback for.
361 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
362 __releases(&dev->power.lock) __acquires(&dev->power.lock)
365 bool use_links = dev->power.links_count > 0;
367 if (dev->power.irq_safe) {
368 spin_unlock(&dev->power.lock);
370 spin_unlock_irq(&dev->power.lock);
373 * Resume suppliers if necessary.
375 * The device's runtime PM status cannot change until this
376 * routine returns, so it is safe to read the status outside of
379 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
380 idx = device_links_read_lock();
382 retval = rpm_get_suppliers(dev);
384 rpm_put_suppliers(dev);
388 device_links_read_unlock(idx);
395 if (dev->power.irq_safe) {
396 spin_lock(&dev->power.lock);
399 * If the device is suspending and the callback has returned
400 * success, drop the usage counters of the suppliers that have
401 * been reference counted on its resume.
403 * Do that if resume fails too.
406 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
407 (dev->power.runtime_status == RPM_RESUMING && retval))) {
408 idx = device_links_read_lock();
410 __rpm_put_suppliers(dev, false);
413 device_links_read_unlock(idx);
416 spin_lock_irq(&dev->power.lock);
423 * rpm_callback - Run a given runtime PM callback for a given device.
424 * @cb: Runtime PM callback to run.
425 * @dev: Device to run the callback for.
427 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
431 if (dev->power.memalloc_noio) {
432 unsigned int noio_flag;
435 * Deadlock might be caused if memory allocation with
436 * GFP_KERNEL happens inside runtime_suspend and
437 * runtime_resume callbacks of one block device's
438 * ancestor or the block device itself. Network
439 * device might be thought as part of iSCSI block
440 * device, so network device and its ancestor should
441 * be marked as memalloc_noio too.
443 noio_flag = memalloc_noio_save();
444 retval = __rpm_callback(cb, dev);
445 memalloc_noio_restore(noio_flag);
447 retval = __rpm_callback(cb, dev);
450 dev->power.runtime_error = retval;
451 return retval != -EACCES ? retval : -EIO;
455 * rpm_idle - Notify device bus type if the device can be suspended.
456 * @dev: Device to notify the bus type about.
457 * @rpmflags: Flag bits.
459 * Check if the device's runtime PM status allows it to be suspended. If
460 * another idle notification has been started earlier, return immediately. If
461 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
462 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
463 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
465 * This function must be called under dev->power.lock with interrupts disabled.
467 static int rpm_idle(struct device *dev, int rpmflags)
469 int (*callback)(struct device *);
472 trace_rpm_idle(dev, rpmflags);
473 retval = rpm_check_suspend_allowed(dev);
475 ; /* Conditions are wrong. */
477 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
478 else if (dev->power.runtime_status != RPM_ACTIVE)
482 * Any pending request other than an idle notification takes
483 * precedence over us, except that the timer may be running.
485 else if (dev->power.request_pending &&
486 dev->power.request > RPM_REQ_IDLE)
489 /* Act as though RPM_NOWAIT is always set. */
490 else if (dev->power.idle_notification)
491 retval = -EINPROGRESS;
496 /* Pending requests need to be canceled. */
497 dev->power.request = RPM_REQ_NONE;
499 callback = RPM_GET_CALLBACK(dev, runtime_idle);
501 /* If no callback assume success. */
502 if (!callback || dev->power.no_callbacks)
505 /* Carry out an asynchronous or a synchronous idle notification. */
506 if (rpmflags & RPM_ASYNC) {
507 dev->power.request = RPM_REQ_IDLE;
508 if (!dev->power.request_pending) {
509 dev->power.request_pending = true;
510 queue_work(pm_wq, &dev->power.work);
512 trace_rpm_return_int(dev, _THIS_IP_, 0);
516 dev->power.idle_notification = true;
518 if (dev->power.irq_safe)
519 spin_unlock(&dev->power.lock);
521 spin_unlock_irq(&dev->power.lock);
523 retval = callback(dev);
525 if (dev->power.irq_safe)
526 spin_lock(&dev->power.lock);
528 spin_lock_irq(&dev->power.lock);
530 dev->power.idle_notification = false;
531 wake_up_all(&dev->power.wait_queue);
534 trace_rpm_return_int(dev, _THIS_IP_, retval);
535 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
539 * rpm_suspend - Carry out runtime suspend of given device.
540 * @dev: Device to suspend.
541 * @rpmflags: Flag bits.
543 * Check if the device's runtime PM status allows it to be suspended.
544 * Cancel a pending idle notification, autosuspend or suspend. If
545 * another suspend has been started earlier, either return immediately
546 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
547 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
548 * otherwise run the ->runtime_suspend() callback directly. When
549 * ->runtime_suspend succeeded, if a deferred resume was requested while
550 * the callback was running then carry it out, otherwise send an idle
551 * notification for its parent (if the suspend succeeded and both
552 * ignore_children of parent->power and irq_safe of dev->power are not set).
553 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
554 * flag is set and the next autosuspend-delay expiration time is in the
555 * future, schedule another autosuspend attempt.
557 * This function must be called under dev->power.lock with interrupts disabled.
559 static int rpm_suspend(struct device *dev, int rpmflags)
560 __releases(&dev->power.lock) __acquires(&dev->power.lock)
562 int (*callback)(struct device *);
563 struct device *parent = NULL;
566 trace_rpm_suspend(dev, rpmflags);
569 retval = rpm_check_suspend_allowed(dev);
571 goto out; /* Conditions are wrong. */
573 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
574 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
580 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
581 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
582 u64 expires = pm_runtime_autosuspend_expiration(dev);
585 /* Pending requests need to be canceled. */
586 dev->power.request = RPM_REQ_NONE;
589 * Optimization: If the timer is already running and is
590 * set to expire at or before the autosuspend delay,
591 * avoid the overhead of resetting it. Just let it
592 * expire; pm_suspend_timer_fn() will take care of the
595 if (!(dev->power.timer_expires &&
596 dev->power.timer_expires <= expires)) {
598 * We add a slack of 25% to gather wakeups
599 * without sacrificing the granularity.
601 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
602 (NSEC_PER_MSEC >> 2);
604 dev->power.timer_expires = expires;
605 hrtimer_start_range_ns(&dev->power.suspend_timer,
606 ns_to_ktime(expires),
610 dev->power.timer_autosuspends = 1;
615 /* Other scheduled or pending requests need to be canceled. */
616 pm_runtime_cancel_pending(dev);
618 if (dev->power.runtime_status == RPM_SUSPENDING) {
621 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
622 retval = -EINPROGRESS;
626 if (dev->power.irq_safe) {
627 spin_unlock(&dev->power.lock);
631 spin_lock(&dev->power.lock);
635 /* Wait for the other suspend running in parallel with us. */
637 prepare_to_wait(&dev->power.wait_queue, &wait,
638 TASK_UNINTERRUPTIBLE);
639 if (dev->power.runtime_status != RPM_SUSPENDING)
642 spin_unlock_irq(&dev->power.lock);
646 spin_lock_irq(&dev->power.lock);
648 finish_wait(&dev->power.wait_queue, &wait);
652 if (dev->power.no_callbacks)
653 goto no_callback; /* Assume success. */
655 /* Carry out an asynchronous or a synchronous suspend. */
656 if (rpmflags & RPM_ASYNC) {
657 dev->power.request = (rpmflags & RPM_AUTO) ?
658 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
659 if (!dev->power.request_pending) {
660 dev->power.request_pending = true;
661 queue_work(pm_wq, &dev->power.work);
666 __update_runtime_status(dev, RPM_SUSPENDING);
668 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
670 dev_pm_enable_wake_irq_check(dev, true);
671 retval = rpm_callback(callback, dev);
675 dev_pm_enable_wake_irq_complete(dev);
678 __update_runtime_status(dev, RPM_SUSPENDED);
679 pm_runtime_deactivate_timer(dev);
682 parent = dev->parent;
683 atomic_add_unless(&parent->power.child_count, -1, 0);
685 wake_up_all(&dev->power.wait_queue);
687 if (dev->power.deferred_resume) {
688 dev->power.deferred_resume = false;
694 if (dev->power.irq_safe)
697 /* Maybe the parent is now able to suspend. */
698 if (parent && !parent->power.ignore_children) {
699 spin_unlock(&dev->power.lock);
701 spin_lock(&parent->power.lock);
702 rpm_idle(parent, RPM_ASYNC);
703 spin_unlock(&parent->power.lock);
705 spin_lock(&dev->power.lock);
707 /* Maybe the suppliers are now able to suspend. */
708 if (dev->power.links_count > 0) {
709 spin_unlock_irq(&dev->power.lock);
711 rpm_suspend_suppliers(dev);
713 spin_lock_irq(&dev->power.lock);
717 trace_rpm_return_int(dev, _THIS_IP_, retval);
722 dev_pm_disable_wake_irq_check(dev, true);
723 __update_runtime_status(dev, RPM_ACTIVE);
724 dev->power.deferred_resume = false;
725 wake_up_all(&dev->power.wait_queue);
727 if (retval == -EAGAIN || retval == -EBUSY) {
728 dev->power.runtime_error = 0;
731 * If the callback routine failed an autosuspend, and
732 * if the last_busy time has been updated so that there
733 * is a new autosuspend expiration time, automatically
734 * reschedule another autosuspend.
736 if ((rpmflags & RPM_AUTO) &&
737 pm_runtime_autosuspend_expiration(dev) != 0)
740 pm_runtime_cancel_pending(dev);
746 * rpm_resume - Carry out runtime resume of given device.
747 * @dev: Device to resume.
748 * @rpmflags: Flag bits.
750 * Check if the device's runtime PM status allows it to be resumed. Cancel
751 * any scheduled or pending requests. If another resume has been started
752 * earlier, either return immediately or wait for it to finish, depending on the
753 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
754 * parallel with this function, either tell the other process to resume after
755 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
756 * flag is set then queue a resume request; otherwise run the
757 * ->runtime_resume() callback directly. Queue an idle notification for the
758 * device if the resume succeeded.
760 * This function must be called under dev->power.lock with interrupts disabled.
762 static int rpm_resume(struct device *dev, int rpmflags)
763 __releases(&dev->power.lock) __acquires(&dev->power.lock)
765 int (*callback)(struct device *);
766 struct device *parent = NULL;
769 trace_rpm_resume(dev, rpmflags);
772 if (dev->power.runtime_error) {
774 } else if (dev->power.disable_depth > 0) {
775 if (dev->power.runtime_status == RPM_ACTIVE &&
776 dev->power.last_status == RPM_ACTIVE)
785 * Other scheduled or pending requests need to be canceled. Small
786 * optimization: If an autosuspend timer is running, leave it running
787 * rather than cancelling it now only to restart it again in the near
790 dev->power.request = RPM_REQ_NONE;
791 if (!dev->power.timer_autosuspends)
792 pm_runtime_deactivate_timer(dev);
794 if (dev->power.runtime_status == RPM_ACTIVE) {
799 if (dev->power.runtime_status == RPM_RESUMING ||
800 dev->power.runtime_status == RPM_SUSPENDING) {
803 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
804 if (dev->power.runtime_status == RPM_SUSPENDING) {
805 dev->power.deferred_resume = true;
806 if (rpmflags & RPM_NOWAIT)
807 retval = -EINPROGRESS;
809 retval = -EINPROGRESS;
814 if (dev->power.irq_safe) {
815 spin_unlock(&dev->power.lock);
819 spin_lock(&dev->power.lock);
823 /* Wait for the operation carried out in parallel with us. */
825 prepare_to_wait(&dev->power.wait_queue, &wait,
826 TASK_UNINTERRUPTIBLE);
827 if (dev->power.runtime_status != RPM_RESUMING &&
828 dev->power.runtime_status != RPM_SUSPENDING)
831 spin_unlock_irq(&dev->power.lock);
835 spin_lock_irq(&dev->power.lock);
837 finish_wait(&dev->power.wait_queue, &wait);
842 * See if we can skip waking up the parent. This is safe only if
843 * power.no_callbacks is set, because otherwise we don't know whether
844 * the resume will actually succeed.
846 if (dev->power.no_callbacks && !parent && dev->parent) {
847 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
848 if (dev->parent->power.disable_depth > 0 ||
849 dev->parent->power.ignore_children ||
850 dev->parent->power.runtime_status == RPM_ACTIVE) {
851 atomic_inc(&dev->parent->power.child_count);
852 spin_unlock(&dev->parent->power.lock);
854 goto no_callback; /* Assume success. */
856 spin_unlock(&dev->parent->power.lock);
859 /* Carry out an asynchronous or a synchronous resume. */
860 if (rpmflags & RPM_ASYNC) {
861 dev->power.request = RPM_REQ_RESUME;
862 if (!dev->power.request_pending) {
863 dev->power.request_pending = true;
864 queue_work(pm_wq, &dev->power.work);
870 if (!parent && dev->parent) {
872 * Increment the parent's usage counter and resume it if
873 * necessary. Not needed if dev is irq-safe; then the
874 * parent is permanently resumed.
876 parent = dev->parent;
877 if (dev->power.irq_safe)
880 spin_unlock(&dev->power.lock);
882 pm_runtime_get_noresume(parent);
884 spin_lock(&parent->power.lock);
886 * Resume the parent if it has runtime PM enabled and not been
887 * set to ignore its children.
889 if (!parent->power.disable_depth &&
890 !parent->power.ignore_children) {
891 rpm_resume(parent, 0);
892 if (parent->power.runtime_status != RPM_ACTIVE)
895 spin_unlock(&parent->power.lock);
897 spin_lock(&dev->power.lock);
905 if (dev->power.no_callbacks)
906 goto no_callback; /* Assume success. */
908 __update_runtime_status(dev, RPM_RESUMING);
910 callback = RPM_GET_CALLBACK(dev, runtime_resume);
912 dev_pm_disable_wake_irq_check(dev, false);
913 retval = rpm_callback(callback, dev);
915 __update_runtime_status(dev, RPM_SUSPENDED);
916 pm_runtime_cancel_pending(dev);
917 dev_pm_enable_wake_irq_check(dev, false);
920 __update_runtime_status(dev, RPM_ACTIVE);
921 pm_runtime_mark_last_busy(dev);
923 atomic_inc(&parent->power.child_count);
925 wake_up_all(&dev->power.wait_queue);
928 rpm_idle(dev, RPM_ASYNC);
931 if (parent && !dev->power.irq_safe) {
932 spin_unlock_irq(&dev->power.lock);
934 pm_runtime_put(parent);
936 spin_lock_irq(&dev->power.lock);
939 trace_rpm_return_int(dev, _THIS_IP_, retval);
945 * pm_runtime_work - Universal runtime PM work function.
946 * @work: Work structure used for scheduling the execution of this function.
948 * Use @work to get the device object the work is to be done for, determine what
949 * is to be done and execute the appropriate runtime PM function.
951 static void pm_runtime_work(struct work_struct *work)
953 struct device *dev = container_of(work, struct device, power.work);
954 enum rpm_request req;
956 spin_lock_irq(&dev->power.lock);
958 if (!dev->power.request_pending)
961 req = dev->power.request;
962 dev->power.request = RPM_REQ_NONE;
963 dev->power.request_pending = false;
969 rpm_idle(dev, RPM_NOWAIT);
971 case RPM_REQ_SUSPEND:
972 rpm_suspend(dev, RPM_NOWAIT);
974 case RPM_REQ_AUTOSUSPEND:
975 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
978 rpm_resume(dev, RPM_NOWAIT);
983 spin_unlock_irq(&dev->power.lock);
987 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
988 * @timer: hrtimer used by pm_schedule_suspend().
990 * Check if the time is right and queue a suspend request.
992 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
994 struct device *dev = container_of(timer, struct device, power.suspend_timer);
998 spin_lock_irqsave(&dev->power.lock, flags);
1000 expires = dev->power.timer_expires;
1002 * If 'expires' is after the current time, we've been called
1005 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
1006 dev->power.timer_expires = 0;
1007 rpm_suspend(dev, dev->power.timer_autosuspends ?
1008 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1011 spin_unlock_irqrestore(&dev->power.lock, flags);
1013 return HRTIMER_NORESTART;
1017 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1018 * @dev: Device to suspend.
1019 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1021 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1023 unsigned long flags;
1027 spin_lock_irqsave(&dev->power.lock, flags);
1030 retval = rpm_suspend(dev, RPM_ASYNC);
1034 retval = rpm_check_suspend_allowed(dev);
1038 /* Other scheduled or pending requests need to be canceled. */
1039 pm_runtime_cancel_pending(dev);
1041 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1042 dev->power.timer_expires = expires;
1043 dev->power.timer_autosuspends = 0;
1044 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1047 spin_unlock_irqrestore(&dev->power.lock, flags);
1051 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1053 static int rpm_drop_usage_count(struct device *dev)
1057 ret = atomic_sub_return(1, &dev->power.usage_count);
1062 * Because rpm_resume() does not check the usage counter, it will resume
1063 * the device even if the usage counter is 0 or negative, so it is
1064 * sufficient to increment the usage counter here to reverse the change
1067 atomic_inc(&dev->power.usage_count);
1068 dev_warn(dev, "Runtime PM usage count underflow!\n");
1073 * __pm_runtime_idle - Entry point for runtime idle operations.
1074 * @dev: Device to send idle notification for.
1075 * @rpmflags: Flag bits.
1077 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1078 * return immediately if it is larger than zero (if it becomes negative, log a
1079 * warning, increment it, and return an error). Then carry out an idle
1080 * notification, either synchronous or asynchronous.
1082 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1083 * or if pm_runtime_irq_safe() has been called.
1085 int __pm_runtime_idle(struct device *dev, int rpmflags)
1087 unsigned long flags;
1090 if (rpmflags & RPM_GET_PUT) {
1091 retval = rpm_drop_usage_count(dev);
1094 } else if (retval > 0) {
1095 trace_rpm_usage(dev, rpmflags);
1100 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1102 spin_lock_irqsave(&dev->power.lock, flags);
1103 retval = rpm_idle(dev, rpmflags);
1104 spin_unlock_irqrestore(&dev->power.lock, flags);
1108 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1111 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1112 * @dev: Device to suspend.
1113 * @rpmflags: Flag bits.
1115 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1116 * return immediately if it is larger than zero (if it becomes negative, log a
1117 * warning, increment it, and return an error). Then carry out a suspend,
1118 * either synchronous or asynchronous.
1120 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1121 * or if pm_runtime_irq_safe() has been called.
1123 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1125 unsigned long flags;
1128 if (rpmflags & RPM_GET_PUT) {
1129 retval = rpm_drop_usage_count(dev);
1132 } else if (retval > 0) {
1133 trace_rpm_usage(dev, rpmflags);
1138 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1140 spin_lock_irqsave(&dev->power.lock, flags);
1141 retval = rpm_suspend(dev, rpmflags);
1142 spin_unlock_irqrestore(&dev->power.lock, flags);
1146 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1149 * __pm_runtime_resume - Entry point for runtime resume operations.
1150 * @dev: Device to resume.
1151 * @rpmflags: Flag bits.
1153 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1154 * carry out a resume, either synchronous or asynchronous.
1156 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1157 * or if pm_runtime_irq_safe() has been called.
1159 int __pm_runtime_resume(struct device *dev, int rpmflags)
1161 unsigned long flags;
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1165 dev->power.runtime_status != RPM_ACTIVE);
1167 if (rpmflags & RPM_GET_PUT)
1168 atomic_inc(&dev->power.usage_count);
1170 spin_lock_irqsave(&dev->power.lock, flags);
1171 retval = rpm_resume(dev, rpmflags);
1172 spin_unlock_irqrestore(&dev->power.lock, flags);
1176 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1179 * pm_runtime_get_if_active - Conditionally bump up device usage counter.
1180 * @dev: Device to handle.
1181 * @ign_usage_count: Whether or not to look at the current usage counter value.
1183 * Return -EINVAL if runtime PM is disabled for @dev.
1185 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1186 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1187 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1188 * without changing the usage counter.
1190 * If @ign_usage_count is %true, this function can be used to prevent suspending
1191 * the device when its runtime PM status is %RPM_ACTIVE.
1193 * If @ign_usage_count is %false, this function can be used to prevent
1194 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1195 * runtime PM usage counter is not zero.
1197 * The caller is responsible for decrementing the runtime PM usage counter of
1198 * @dev after this function has returned a positive value for it.
1200 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1202 unsigned long flags;
1205 spin_lock_irqsave(&dev->power.lock, flags);
1206 if (dev->power.disable_depth > 0) {
1208 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1210 } else if (ign_usage_count) {
1212 atomic_inc(&dev->power.usage_count);
1214 retval = atomic_inc_not_zero(&dev->power.usage_count);
1216 trace_rpm_usage(dev, 0);
1217 spin_unlock_irqrestore(&dev->power.lock, flags);
1221 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1224 * __pm_runtime_set_status - Set runtime PM status of a device.
1225 * @dev: Device to handle.
1226 * @status: New runtime PM status of the device.
1228 * If runtime PM of the device is disabled or its power.runtime_error field is
1229 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1230 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1231 * However, if the device has a parent and the parent is not active, and the
1232 * parent's power.ignore_children flag is unset, the device's status cannot be
1233 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1235 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1236 * and the device parent's counter of unsuspended children is modified to
1237 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1238 * notification request for the parent is submitted.
1240 * If @dev has any suppliers (as reflected by device links to them), and @status
1241 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1242 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1243 * of the @status value) and the suppliers will be deacticated on exit. The
1244 * error returned by the failing supplier activation will be returned in that
1247 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1249 struct device *parent = dev->parent;
1250 bool notify_parent = false;
1251 unsigned long flags;
1254 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1257 spin_lock_irqsave(&dev->power.lock, flags);
1260 * Prevent PM-runtime from being enabled for the device or return an
1261 * error if it is enabled already and working.
1263 if (dev->power.runtime_error || dev->power.disable_depth)
1264 dev->power.disable_depth++;
1268 spin_unlock_irqrestore(&dev->power.lock, flags);
1274 * If the new status is RPM_ACTIVE, the suppliers can be activated
1275 * upfront regardless of the current status, because next time
1276 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1277 * involved will be dropped down to one anyway.
1279 if (status == RPM_ACTIVE) {
1280 int idx = device_links_read_lock();
1282 error = rpm_get_suppliers(dev);
1284 status = RPM_SUSPENDED;
1286 device_links_read_unlock(idx);
1289 spin_lock_irqsave(&dev->power.lock, flags);
1291 if (dev->power.runtime_status == status || !parent)
1294 if (status == RPM_SUSPENDED) {
1295 atomic_add_unless(&parent->power.child_count, -1, 0);
1296 notify_parent = !parent->power.ignore_children;
1298 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1301 * It is invalid to put an active child under a parent that is
1302 * not active, has runtime PM enabled and the
1303 * 'power.ignore_children' flag unset.
1305 if (!parent->power.disable_depth &&
1306 !parent->power.ignore_children &&
1307 parent->power.runtime_status != RPM_ACTIVE) {
1308 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1312 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1313 atomic_inc(&parent->power.child_count);
1316 spin_unlock(&parent->power.lock);
1319 status = RPM_SUSPENDED;
1325 __update_runtime_status(dev, status);
1327 dev->power.runtime_error = 0;
1330 spin_unlock_irqrestore(&dev->power.lock, flags);
1333 pm_request_idle(parent);
1335 if (status == RPM_SUSPENDED) {
1336 int idx = device_links_read_lock();
1338 rpm_put_suppliers(dev);
1340 device_links_read_unlock(idx);
1343 pm_runtime_enable(dev);
1347 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1350 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1351 * @dev: Device to handle.
1353 * Flush all pending requests for the device from pm_wq and wait for all
1354 * runtime PM operations involving the device in progress to complete.
1356 * Should be called under dev->power.lock with interrupts disabled.
1358 static void __pm_runtime_barrier(struct device *dev)
1360 pm_runtime_deactivate_timer(dev);
1362 if (dev->power.request_pending) {
1363 dev->power.request = RPM_REQ_NONE;
1364 spin_unlock_irq(&dev->power.lock);
1366 cancel_work_sync(&dev->power.work);
1368 spin_lock_irq(&dev->power.lock);
1369 dev->power.request_pending = false;
1372 if (dev->power.runtime_status == RPM_SUSPENDING ||
1373 dev->power.runtime_status == RPM_RESUMING ||
1374 dev->power.idle_notification) {
1377 /* Suspend, wake-up or idle notification in progress. */
1379 prepare_to_wait(&dev->power.wait_queue, &wait,
1380 TASK_UNINTERRUPTIBLE);
1381 if (dev->power.runtime_status != RPM_SUSPENDING
1382 && dev->power.runtime_status != RPM_RESUMING
1383 && !dev->power.idle_notification)
1385 spin_unlock_irq(&dev->power.lock);
1389 spin_lock_irq(&dev->power.lock);
1391 finish_wait(&dev->power.wait_queue, &wait);
1396 * pm_runtime_barrier - Flush pending requests and wait for completions.
1397 * @dev: Device to handle.
1399 * Prevent the device from being suspended by incrementing its usage counter and
1400 * if there's a pending resume request for the device, wake the device up.
1401 * Next, make sure that all pending requests for the device have been flushed
1402 * from pm_wq and wait for all runtime PM operations involving the device in
1403 * progress to complete.
1406 * 1, if there was a resume request pending and the device had to be woken up,
1409 int pm_runtime_barrier(struct device *dev)
1413 pm_runtime_get_noresume(dev);
1414 spin_lock_irq(&dev->power.lock);
1416 if (dev->power.request_pending
1417 && dev->power.request == RPM_REQ_RESUME) {
1422 __pm_runtime_barrier(dev);
1424 spin_unlock_irq(&dev->power.lock);
1425 pm_runtime_put_noidle(dev);
1429 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1432 * __pm_runtime_disable - Disable runtime PM of a device.
1433 * @dev: Device to handle.
1434 * @check_resume: If set, check if there's a resume request for the device.
1436 * Increment power.disable_depth for the device and if it was zero previously,
1437 * cancel all pending runtime PM requests for the device and wait for all
1438 * operations in progress to complete. The device can be either active or
1439 * suspended after its runtime PM has been disabled.
1441 * If @check_resume is set and there's a resume request pending when
1442 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1443 * function will wake up the device before disabling its runtime PM.
1445 void __pm_runtime_disable(struct device *dev, bool check_resume)
1447 spin_lock_irq(&dev->power.lock);
1449 if (dev->power.disable_depth > 0) {
1450 dev->power.disable_depth++;
1455 * Wake up the device if there's a resume request pending, because that
1456 * means there probably is some I/O to process and disabling runtime PM
1457 * shouldn't prevent the device from processing the I/O.
1459 if (check_resume && dev->power.request_pending &&
1460 dev->power.request == RPM_REQ_RESUME) {
1462 * Prevent suspends and idle notifications from being carried
1463 * out after we have woken up the device.
1465 pm_runtime_get_noresume(dev);
1469 pm_runtime_put_noidle(dev);
1472 /* Update time accounting before disabling PM-runtime. */
1473 update_pm_runtime_accounting(dev);
1475 if (!dev->power.disable_depth++) {
1476 __pm_runtime_barrier(dev);
1477 dev->power.last_status = dev->power.runtime_status;
1481 spin_unlock_irq(&dev->power.lock);
1483 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1486 * pm_runtime_enable - Enable runtime PM of a device.
1487 * @dev: Device to handle.
1489 void pm_runtime_enable(struct device *dev)
1491 unsigned long flags;
1493 spin_lock_irqsave(&dev->power.lock, flags);
1495 if (!dev->power.disable_depth) {
1496 dev_warn(dev, "Unbalanced %s!\n", __func__);
1500 if (--dev->power.disable_depth > 0)
1503 dev->power.last_status = RPM_INVALID;
1504 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1506 if (dev->power.runtime_status == RPM_SUSPENDED &&
1507 !dev->power.ignore_children &&
1508 atomic_read(&dev->power.child_count) > 0)
1509 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1512 spin_unlock_irqrestore(&dev->power.lock, flags);
1514 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1516 static void pm_runtime_disable_action(void *data)
1518 pm_runtime_dont_use_autosuspend(data);
1519 pm_runtime_disable(data);
1523 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1525 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1526 * you at driver exit time if needed.
1528 * @dev: Device to handle.
1530 int devm_pm_runtime_enable(struct device *dev)
1532 pm_runtime_enable(dev);
1534 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1536 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1539 * pm_runtime_forbid - Block runtime PM of a device.
1540 * @dev: Device to handle.
1542 * Increase the device's usage count and clear its power.runtime_auto flag,
1543 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1546 void pm_runtime_forbid(struct device *dev)
1548 spin_lock_irq(&dev->power.lock);
1549 if (!dev->power.runtime_auto)
1552 dev->power.runtime_auto = false;
1553 atomic_inc(&dev->power.usage_count);
1557 spin_unlock_irq(&dev->power.lock);
1559 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1562 * pm_runtime_allow - Unblock runtime PM of a device.
1563 * @dev: Device to handle.
1565 * Decrease the device's usage count and set its power.runtime_auto flag.
1567 void pm_runtime_allow(struct device *dev)
1571 spin_lock_irq(&dev->power.lock);
1572 if (dev->power.runtime_auto)
1575 dev->power.runtime_auto = true;
1576 ret = rpm_drop_usage_count(dev);
1578 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1580 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1583 spin_unlock_irq(&dev->power.lock);
1585 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1588 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1589 * @dev: Device to handle.
1591 * Set the power.no_callbacks flag, which tells the PM core that this
1592 * device is power-managed through its parent and has no runtime PM
1593 * callbacks of its own. The runtime sysfs attributes will be removed.
1595 void pm_runtime_no_callbacks(struct device *dev)
1597 spin_lock_irq(&dev->power.lock);
1598 dev->power.no_callbacks = 1;
1599 spin_unlock_irq(&dev->power.lock);
1600 if (device_is_registered(dev))
1601 rpm_sysfs_remove(dev);
1603 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1606 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1607 * @dev: Device to handle
1609 * Set the power.irq_safe flag, which tells the PM core that the
1610 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1611 * always be invoked with the spinlock held and interrupts disabled. It also
1612 * causes the parent's usage counter to be permanently incremented, preventing
1613 * the parent from runtime suspending -- otherwise an irq-safe child might have
1614 * to wait for a non-irq-safe parent.
1616 void pm_runtime_irq_safe(struct device *dev)
1619 pm_runtime_get_sync(dev->parent);
1621 spin_lock_irq(&dev->power.lock);
1622 dev->power.irq_safe = 1;
1623 spin_unlock_irq(&dev->power.lock);
1625 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1628 * update_autosuspend - Handle a change to a device's autosuspend settings.
1629 * @dev: Device to handle.
1630 * @old_delay: The former autosuspend_delay value.
1631 * @old_use: The former use_autosuspend value.
1633 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1634 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1636 * This function must be called under dev->power.lock with interrupts disabled.
1638 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1640 int delay = dev->power.autosuspend_delay;
1642 /* Should runtime suspend be prevented now? */
1643 if (dev->power.use_autosuspend && delay < 0) {
1645 /* If it used to be allowed then prevent it. */
1646 if (!old_use || old_delay >= 0) {
1647 atomic_inc(&dev->power.usage_count);
1650 trace_rpm_usage(dev, 0);
1654 /* Runtime suspend should be allowed now. */
1657 /* If it used to be prevented then allow it. */
1658 if (old_use && old_delay < 0)
1659 atomic_dec(&dev->power.usage_count);
1661 /* Maybe we can autosuspend now. */
1662 rpm_idle(dev, RPM_AUTO);
1667 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1668 * @dev: Device to handle.
1669 * @delay: Value of the new delay in milliseconds.
1671 * Set the device's power.autosuspend_delay value. If it changes to negative
1672 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1673 * changes the other way, allow runtime suspends.
1675 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1677 int old_delay, old_use;
1679 spin_lock_irq(&dev->power.lock);
1680 old_delay = dev->power.autosuspend_delay;
1681 old_use = dev->power.use_autosuspend;
1682 dev->power.autosuspend_delay = delay;
1683 update_autosuspend(dev, old_delay, old_use);
1684 spin_unlock_irq(&dev->power.lock);
1686 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1689 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1690 * @dev: Device to handle.
1691 * @use: New value for use_autosuspend.
1693 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1694 * suspends as needed.
1696 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1698 int old_delay, old_use;
1700 spin_lock_irq(&dev->power.lock);
1701 old_delay = dev->power.autosuspend_delay;
1702 old_use = dev->power.use_autosuspend;
1703 dev->power.use_autosuspend = use;
1704 update_autosuspend(dev, old_delay, old_use);
1705 spin_unlock_irq(&dev->power.lock);
1707 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1710 * pm_runtime_init - Initialize runtime PM fields in given device object.
1711 * @dev: Device object to initialize.
1713 void pm_runtime_init(struct device *dev)
1715 dev->power.runtime_status = RPM_SUSPENDED;
1716 dev->power.last_status = RPM_INVALID;
1717 dev->power.idle_notification = false;
1719 dev->power.disable_depth = 1;
1720 atomic_set(&dev->power.usage_count, 0);
1722 dev->power.runtime_error = 0;
1724 atomic_set(&dev->power.child_count, 0);
1725 pm_suspend_ignore_children(dev, false);
1726 dev->power.runtime_auto = true;
1728 dev->power.request_pending = false;
1729 dev->power.request = RPM_REQ_NONE;
1730 dev->power.deferred_resume = false;
1731 dev->power.needs_force_resume = 0;
1732 INIT_WORK(&dev->power.work, pm_runtime_work);
1734 dev->power.timer_expires = 0;
1735 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1736 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1738 init_waitqueue_head(&dev->power.wait_queue);
1742 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1743 * @dev: Device object to re-initialize.
1745 void pm_runtime_reinit(struct device *dev)
1747 if (!pm_runtime_enabled(dev)) {
1748 if (dev->power.runtime_status == RPM_ACTIVE)
1749 pm_runtime_set_suspended(dev);
1750 if (dev->power.irq_safe) {
1751 spin_lock_irq(&dev->power.lock);
1752 dev->power.irq_safe = 0;
1753 spin_unlock_irq(&dev->power.lock);
1755 pm_runtime_put(dev->parent);
1761 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1762 * @dev: Device object being removed from device hierarchy.
1764 void pm_runtime_remove(struct device *dev)
1766 __pm_runtime_disable(dev, false);
1767 pm_runtime_reinit(dev);
1771 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1772 * @dev: Consumer device.
1774 void pm_runtime_get_suppliers(struct device *dev)
1776 struct device_link *link;
1779 idx = device_links_read_lock();
1781 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1782 device_links_read_lock_held())
1783 if (link->flags & DL_FLAG_PM_RUNTIME) {
1784 link->supplier_preactivated = true;
1785 pm_runtime_get_sync(link->supplier);
1788 device_links_read_unlock(idx);
1792 * pm_runtime_put_suppliers - Drop references to supplier devices.
1793 * @dev: Consumer device.
1795 void pm_runtime_put_suppliers(struct device *dev)
1797 struct device_link *link;
1800 idx = device_links_read_lock();
1802 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1803 device_links_read_lock_held())
1804 if (link->supplier_preactivated) {
1805 link->supplier_preactivated = false;
1806 pm_runtime_put(link->supplier);
1809 device_links_read_unlock(idx);
1812 void pm_runtime_new_link(struct device *dev)
1814 spin_lock_irq(&dev->power.lock);
1815 dev->power.links_count++;
1816 spin_unlock_irq(&dev->power.lock);
1819 static void pm_runtime_drop_link_count(struct device *dev)
1821 spin_lock_irq(&dev->power.lock);
1822 WARN_ON(dev->power.links_count == 0);
1823 dev->power.links_count--;
1824 spin_unlock_irq(&dev->power.lock);
1828 * pm_runtime_drop_link - Prepare for device link removal.
1829 * @link: Device link going away.
1831 * Drop the link count of the consumer end of @link and decrement the supplier
1832 * device's runtime PM usage counter as many times as needed to drop all of the
1833 * PM runtime reference to it from the consumer.
1835 void pm_runtime_drop_link(struct device_link *link)
1837 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1840 pm_runtime_drop_link_count(link->consumer);
1841 pm_runtime_release_supplier(link);
1842 pm_request_idle(link->supplier);
1845 static bool pm_runtime_need_not_resume(struct device *dev)
1847 return atomic_read(&dev->power.usage_count) <= 1 &&
1848 (atomic_read(&dev->power.child_count) == 0 ||
1849 dev->power.ignore_children);
1853 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1854 * @dev: Device to suspend.
1856 * Disable runtime PM so we safely can check the device's runtime PM status and
1857 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1858 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1859 * usage and children counters don't indicate that the device was in use before
1860 * the system-wide transition under way, decrement its parent's children counter
1861 * (if there is a parent). Keep runtime PM disabled to preserve the state
1862 * unless we encounter errors.
1864 * Typically this function may be invoked from a system suspend callback to make
1865 * sure the device is put into low power state and it should only be used during
1866 * system-wide PM transitions to sleep states. It assumes that the analogous
1867 * pm_runtime_force_resume() will be used to resume the device.
1869 * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
1870 * state where this function has called the ->runtime_suspend callback but the
1871 * PM core marks the driver as runtime active.
1873 int pm_runtime_force_suspend(struct device *dev)
1875 int (*callback)(struct device *);
1878 pm_runtime_disable(dev);
1879 if (pm_runtime_status_suspended(dev))
1882 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1884 dev_pm_enable_wake_irq_check(dev, true);
1885 ret = callback ? callback(dev) : 0;
1889 dev_pm_enable_wake_irq_complete(dev);
1892 * If the device can stay in suspend after the system-wide transition
1893 * to the working state that will follow, drop the children counter of
1894 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1895 * function will be called again for it in the meantime.
1897 if (pm_runtime_need_not_resume(dev)) {
1898 pm_runtime_set_suspended(dev);
1900 __update_runtime_status(dev, RPM_SUSPENDED);
1901 dev->power.needs_force_resume = 1;
1907 dev_pm_disable_wake_irq_check(dev, true);
1908 pm_runtime_enable(dev);
1911 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1914 * pm_runtime_force_resume - Force a device into resume state if needed.
1915 * @dev: Device to resume.
1917 * Prior invoking this function we expect the user to have brought the device
1918 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1919 * those actions and bring the device into full power, if it is expected to be
1920 * used on system resume. In the other case, we defer the resume to be managed
1923 * Typically this function may be invoked from a system resume callback.
1925 int pm_runtime_force_resume(struct device *dev)
1927 int (*callback)(struct device *);
1930 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1934 * The value of the parent's children counter is correct already, so
1935 * just update the status of the device.
1937 __update_runtime_status(dev, RPM_ACTIVE);
1939 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1941 dev_pm_disable_wake_irq_check(dev, false);
1942 ret = callback ? callback(dev) : 0;
1944 pm_runtime_set_suspended(dev);
1945 dev_pm_enable_wake_irq_check(dev, false);
1949 pm_runtime_mark_last_busy(dev);
1951 dev->power.needs_force_resume = 0;
1952 pm_runtime_enable(dev);
1955 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);