1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
67 static int async_error;
69 static const char *pm_verb(int event)
72 case PM_EVENT_SUSPEND:
78 case PM_EVENT_QUIESCE:
80 case PM_EVENT_HIBERNATE:
84 case PM_EVENT_RESTORE:
86 case PM_EVENT_RECOVER:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device *deva, struct device *devb)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device *deva, struct device *devb)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 if (!pm_print_times_enabled)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
223 if (!pm_print_times_enabled)
226 rettime = ktime_get();
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228 (unsigned long long)ktime_us_delta(rettime, calltime));
232 * dpm_wait - Wait for a PM operation to complete.
233 * @dev: Device to wait for.
234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
236 static void dpm_wait(struct device *dev, bool async)
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 dpm_wait(dev, *((bool *)async_ptr));
251 static void dpm_wait_for_children(struct device *dev, bool async)
253 device_for_each_child(dev, &async, dpm_wait_fn);
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 struct device_link *link;
261 idx = device_links_read_lock();
264 * If the supplier goes away right after we've checked the link to it,
265 * we'll wait for its completion to change the state, but that's fine,
266 * because the only things that will block as a result are the SRCU
267 * callbacks freeing the link objects for the links in the list we're
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 dpm_wait(link->supplier, async);
274 device_links_read_unlock(idx);
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
279 struct device *parent;
282 * If the device is resumed asynchronously and the parent's callback
283 * deletes both the device and the parent itself, the parent object may
284 * be freed while this function is running, so avoid that by reference
285 * counting the parent once more unless the device has been deleted
286 * already (in which case return right away).
288 mutex_lock(&dpm_list_mtx);
290 if (!device_pm_initialized(dev)) {
291 mutex_unlock(&dpm_list_mtx);
295 parent = get_device(dev->parent);
297 mutex_unlock(&dpm_list_mtx);
299 dpm_wait(parent, async);
302 dpm_wait_for_suppliers(dev, async);
305 * If the parent's callback has deleted the device, attempting to resume
306 * it would be invalid, so avoid doing that then.
308 return device_pm_initialized(dev);
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
313 struct device_link *link;
316 idx = device_links_read_lock();
319 * The status of a device link can only be changed from "dormant" by a
320 * probe, but that cannot happen during system suspend/resume. In
321 * theory it can change to "dormant" at that time, but then it is
322 * reasonable to wait for the target device anyway (eg. if it goes
323 * away, it's better to wait for it to go away completely and then
324 * continue instead of trying to continue in parallel with its
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 dpm_wait(link->consumer, async);
331 device_links_read_unlock(idx);
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
341 * pm_op - Return the PM operation appropriate for given PM event.
342 * @ops: PM operations to choose from.
343 * @state: PM transition of the system being carried out.
345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
347 switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 case PM_EVENT_SUSPEND:
351 case PM_EVENT_RESUME:
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 case PM_EVENT_FREEZE:
356 case PM_EVENT_QUIESCE:
358 case PM_EVENT_HIBERNATE:
359 return ops->poweroff;
361 case PM_EVENT_RECOVER:
363 case PM_EVENT_RESTORE:
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
373 * @ops: PM operations to choose from.
374 * @state: PM transition of the system being carried out.
376 * Runtime PM is disabled for @dev while this function is being executed.
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
381 switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
407 * @ops: PM operations to choose from.
408 * @state: PM transition of the system being carried out.
410 * The driver of @dev will not receive interrupts while this function is being
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
415 switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
482 calltime = initcall_debug_start(dev, cb);
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
490 initcall_debug_report(dev, calltime, cb, error);
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
498 struct task_struct *tsk;
499 struct timer_list timer;
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507 * @t: The timer that PM watchdog depends on.
509 * Called when a driver has timed out suspending or resuming.
510 * There's not much we can do here to recover so panic() to
511 * capture a crash-dump in pstore.
513 static void dpm_watchdog_handler(struct timer_list *t)
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL, KERN_EMERG);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
524 * dpm_watchdog_set - Enable pm watchdog for given device.
525 * @wd: Watchdog. Must be allocated on the stack.
526 * @dev: Device to handle.
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
530 struct timer_list *timer = &wd->timer;
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536 /* use same timeout value for both suspend and resume */
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
543 * @wd: Watchdog to disable.
545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
547 struct timer_list *timer = &wd->timer;
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
558 /*------------------------- Resume routines -------------------------*/
561 * dev_pm_skip_resume - System-wide device resume optimization check.
562 * @dev: Target device.
565 * - %false if the transition under way is RESTORE.
566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567 * - The logical negation of %power.must_resume otherwise (that is, when the
568 * transition under way is RESUME).
570 bool dev_pm_skip_resume(struct device *dev)
572 if (pm_transition.event == PM_EVENT_RESTORE)
575 if (pm_transition.event == PM_EVENT_THAW)
576 return dev_pm_skip_suspend(dev);
578 return !dev->power.must_resume;
582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
583 * @dev: Device to handle.
584 * @state: PM transition of the system being carried out.
585 * @async: If true, the device is being resumed asynchronously.
587 * The driver of @dev will not receive interrupts while this function is being
590 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
592 pm_callback_t callback = NULL;
593 const char *info = NULL;
600 if (dev->power.syscore || dev->power.direct_complete)
603 if (!dev->power.is_noirq_suspended)
606 if (!dpm_wait_for_superior(dev, async))
609 skip_resume = dev_pm_skip_resume(dev);
611 * If the driver callback is skipped below or by the middle layer
612 * callback and device_resume_early() also skips the driver callback for
613 * this device later, it needs to appear as "suspended" to PM-runtime,
614 * so change its status accordingly.
616 * Otherwise, the device is going to be resumed, so set its PM-runtime
617 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618 * to avoid confusing drivers that don't use it.
621 pm_runtime_set_suspended(dev);
622 else if (dev_pm_skip_suspend(dev))
623 pm_runtime_set_active(dev);
625 if (dev->pm_domain) {
626 info = "noirq power domain ";
627 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 } else if (dev->type && dev->type->pm) {
629 info = "noirq type ";
630 callback = pm_noirq_op(dev->type->pm, state);
631 } else if (dev->class && dev->class->pm) {
632 info = "noirq class ";
633 callback = pm_noirq_op(dev->class->pm, state);
634 } else if (dev->bus && dev->bus->pm) {
636 callback = pm_noirq_op(dev->bus->pm, state);
644 if (dev->driver && dev->driver->pm) {
645 info = "noirq driver ";
646 callback = pm_noirq_op(dev->driver->pm, state);
650 error = dpm_run_callback(callback, dev, state, info);
653 dev->power.is_noirq_suspended = false;
656 complete_all(&dev->power.completion);
660 suspend_stats.failed_resume_noirq++;
661 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
662 dpm_save_failed_dev(dev_name(dev));
663 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
667 static bool is_async(struct device *dev)
669 return dev->power.async_suspend && pm_async_enabled
670 && !pm_trace_is_enabled();
673 static bool dpm_async_fn(struct device *dev, async_func_t func)
675 reinit_completion(&dev->power.completion);
678 dev->power.async_in_progress = true;
682 if (async_schedule_dev_nocall(func, dev))
688 * Because async_schedule_dev_nocall() above has returned false or it
689 * has not been called at all, func() is not running and it is safe to
690 * update the async_in_progress flag without extra synchronization.
692 dev->power.async_in_progress = false;
696 static void async_resume_noirq(void *data, async_cookie_t cookie)
698 struct device *dev = data;
700 device_resume_noirq(dev, pm_transition, true);
704 static void dpm_noirq_resume_devices(pm_message_t state)
707 ktime_t starttime = ktime_get();
709 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
710 mutex_lock(&dpm_list_mtx);
711 pm_transition = state;
714 * Trigger the resume of "async" devices upfront so they don't have to
715 * wait for the "non-async" ones they don't depend on.
717 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
718 dpm_async_fn(dev, async_resume_noirq);
720 while (!list_empty(&dpm_noirq_list)) {
721 dev = to_device(dpm_noirq_list.next);
722 list_move_tail(&dev->power.entry, &dpm_late_early_list);
724 if (!dev->power.async_in_progress) {
727 mutex_unlock(&dpm_list_mtx);
729 device_resume_noirq(dev, state, false);
733 mutex_lock(&dpm_list_mtx);
736 mutex_unlock(&dpm_list_mtx);
737 async_synchronize_full();
738 dpm_show_time(starttime, state, 0, "noirq");
739 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
743 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
744 * @state: PM transition of the system being carried out.
746 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
747 * allow device drivers' interrupt handlers to be called.
749 void dpm_resume_noirq(pm_message_t state)
751 dpm_noirq_resume_devices(state);
753 resume_device_irqs();
754 device_wakeup_disarm_wake_irqs();
758 * device_resume_early - Execute an "early resume" callback for given device.
759 * @dev: Device to handle.
760 * @state: PM transition of the system being carried out.
761 * @async: If true, the device is being resumed asynchronously.
763 * Runtime PM is disabled for @dev while this function is being executed.
765 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
767 pm_callback_t callback = NULL;
768 const char *info = NULL;
774 if (dev->power.syscore || dev->power.direct_complete)
777 if (!dev->power.is_late_suspended)
780 if (!dpm_wait_for_superior(dev, async))
783 if (dev->pm_domain) {
784 info = "early power domain ";
785 callback = pm_late_early_op(&dev->pm_domain->ops, state);
786 } else if (dev->type && dev->type->pm) {
787 info = "early type ";
788 callback = pm_late_early_op(dev->type->pm, state);
789 } else if (dev->class && dev->class->pm) {
790 info = "early class ";
791 callback = pm_late_early_op(dev->class->pm, state);
792 } else if (dev->bus && dev->bus->pm) {
794 callback = pm_late_early_op(dev->bus->pm, state);
799 if (dev_pm_skip_resume(dev))
802 if (dev->driver && dev->driver->pm) {
803 info = "early driver ";
804 callback = pm_late_early_op(dev->driver->pm, state);
808 error = dpm_run_callback(callback, dev, state, info);
811 dev->power.is_late_suspended = false;
816 pm_runtime_enable(dev);
817 complete_all(&dev->power.completion);
820 suspend_stats.failed_resume_early++;
821 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
822 dpm_save_failed_dev(dev_name(dev));
823 pm_dev_err(dev, state, async ? " async early" : " early", error);
827 static void async_resume_early(void *data, async_cookie_t cookie)
829 struct device *dev = data;
831 device_resume_early(dev, pm_transition, true);
836 * dpm_resume_early - Execute "early resume" callbacks for all devices.
837 * @state: PM transition of the system being carried out.
839 void dpm_resume_early(pm_message_t state)
842 ktime_t starttime = ktime_get();
844 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
845 mutex_lock(&dpm_list_mtx);
846 pm_transition = state;
849 * Trigger the resume of "async" devices upfront so they don't have to
850 * wait for the "non-async" ones they don't depend on.
852 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853 dpm_async_fn(dev, async_resume_early);
855 while (!list_empty(&dpm_late_early_list)) {
856 dev = to_device(dpm_late_early_list.next);
857 list_move_tail(&dev->power.entry, &dpm_suspended_list);
859 if (!dev->power.async_in_progress) {
862 mutex_unlock(&dpm_list_mtx);
864 device_resume_early(dev, state, false);
868 mutex_lock(&dpm_list_mtx);
871 mutex_unlock(&dpm_list_mtx);
872 async_synchronize_full();
873 dpm_show_time(starttime, state, 0, "early");
874 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
878 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
879 * @state: PM transition of the system being carried out.
881 void dpm_resume_start(pm_message_t state)
883 dpm_resume_noirq(state);
884 dpm_resume_early(state);
886 EXPORT_SYMBOL_GPL(dpm_resume_start);
889 * device_resume - Execute "resume" callbacks for given device.
890 * @dev: Device to handle.
891 * @state: PM transition of the system being carried out.
892 * @async: If true, the device is being resumed asynchronously.
894 static void device_resume(struct device *dev, pm_message_t state, bool async)
896 pm_callback_t callback = NULL;
897 const char *info = NULL;
899 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
904 if (dev->power.syscore)
907 if (dev->power.direct_complete) {
908 /* Match the pm_runtime_disable() in __device_suspend(). */
909 pm_runtime_enable(dev);
913 if (!dpm_wait_for_superior(dev, async))
916 dpm_watchdog_set(&wd, dev);
920 * This is a fib. But we'll allow new children to be added below
921 * a resumed device, even if the device hasn't been completed yet.
923 dev->power.is_prepared = false;
925 if (!dev->power.is_suspended)
928 if (dev->pm_domain) {
929 info = "power domain ";
930 callback = pm_op(&dev->pm_domain->ops, state);
934 if (dev->type && dev->type->pm) {
936 callback = pm_op(dev->type->pm, state);
940 if (dev->class && dev->class->pm) {
942 callback = pm_op(dev->class->pm, state);
949 callback = pm_op(dev->bus->pm, state);
950 } else if (dev->bus->resume) {
951 info = "legacy bus ";
952 callback = dev->bus->resume;
958 if (!callback && dev->driver && dev->driver->pm) {
960 callback = pm_op(dev->driver->pm, state);
964 error = dpm_run_callback(callback, dev, state, info);
965 dev->power.is_suspended = false;
969 dpm_watchdog_clear(&wd);
972 complete_all(&dev->power.completion);
977 suspend_stats.failed_resume++;
978 dpm_save_failed_step(SUSPEND_RESUME);
979 dpm_save_failed_dev(dev_name(dev));
980 pm_dev_err(dev, state, async ? " async" : "", error);
984 static void async_resume(void *data, async_cookie_t cookie)
986 struct device *dev = data;
988 device_resume(dev, pm_transition, true);
993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994 * @state: PM transition of the system being carried out.
996 * Execute the appropriate "resume" callback for all devices whose status
997 * indicates that they are suspended.
999 void dpm_resume(pm_message_t state)
1002 ktime_t starttime = ktime_get();
1004 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1007 mutex_lock(&dpm_list_mtx);
1008 pm_transition = state;
1012 * Trigger the resume of "async" devices upfront so they don't have to
1013 * wait for the "non-async" ones they don't depend on.
1015 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1016 dpm_async_fn(dev, async_resume);
1018 while (!list_empty(&dpm_suspended_list)) {
1019 dev = to_device(dpm_suspended_list.next);
1023 if (!dev->power.async_in_progress) {
1024 mutex_unlock(&dpm_list_mtx);
1026 device_resume(dev, state, false);
1028 mutex_lock(&dpm_list_mtx);
1031 if (!list_empty(&dev->power.entry))
1032 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1034 mutex_unlock(&dpm_list_mtx);
1038 mutex_lock(&dpm_list_mtx);
1040 mutex_unlock(&dpm_list_mtx);
1041 async_synchronize_full();
1042 dpm_show_time(starttime, state, 0, NULL);
1046 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1050 * device_complete - Complete a PM transition for given device.
1051 * @dev: Device to handle.
1052 * @state: PM transition of the system being carried out.
1054 static void device_complete(struct device *dev, pm_message_t state)
1056 void (*callback)(struct device *) = NULL;
1057 const char *info = NULL;
1059 if (dev->power.syscore)
1064 if (dev->pm_domain) {
1065 info = "completing power domain ";
1066 callback = dev->pm_domain->ops.complete;
1067 } else if (dev->type && dev->type->pm) {
1068 info = "completing type ";
1069 callback = dev->type->pm->complete;
1070 } else if (dev->class && dev->class->pm) {
1071 info = "completing class ";
1072 callback = dev->class->pm->complete;
1073 } else if (dev->bus && dev->bus->pm) {
1074 info = "completing bus ";
1075 callback = dev->bus->pm->complete;
1078 if (!callback && dev->driver && dev->driver->pm) {
1079 info = "completing driver ";
1080 callback = dev->driver->pm->complete;
1084 pm_dev_dbg(dev, state, info);
1091 pm_runtime_put(dev);
1095 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1096 * @state: PM transition of the system being carried out.
1098 * Execute the ->complete() callbacks for all devices whose PM status is not
1099 * DPM_ON (this allows new devices to be registered).
1101 void dpm_complete(pm_message_t state)
1103 struct list_head list;
1105 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1108 INIT_LIST_HEAD(&list);
1109 mutex_lock(&dpm_list_mtx);
1110 while (!list_empty(&dpm_prepared_list)) {
1111 struct device *dev = to_device(dpm_prepared_list.prev);
1114 dev->power.is_prepared = false;
1115 list_move(&dev->power.entry, &list);
1117 mutex_unlock(&dpm_list_mtx);
1119 trace_device_pm_callback_start(dev, "", state.event);
1120 device_complete(dev, state);
1121 trace_device_pm_callback_end(dev, 0);
1125 mutex_lock(&dpm_list_mtx);
1127 list_splice(&list, &dpm_list);
1128 mutex_unlock(&dpm_list_mtx);
1130 /* Allow device probing and trigger re-probing of deferred devices */
1131 device_unblock_probing();
1132 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1136 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1137 * @state: PM transition of the system being carried out.
1139 * Execute "resume" callbacks for all devices and complete the PM transition of
1142 void dpm_resume_end(pm_message_t state)
1145 dpm_complete(state);
1147 EXPORT_SYMBOL_GPL(dpm_resume_end);
1150 /*------------------------- Suspend routines -------------------------*/
1153 * resume_event - Return a "resume" message for given "suspend" sleep state.
1154 * @sleep_state: PM message representing a sleep state.
1156 * Return a PM message representing the resume event corresponding to given
1159 static pm_message_t resume_event(pm_message_t sleep_state)
1161 switch (sleep_state.event) {
1162 case PM_EVENT_SUSPEND:
1164 case PM_EVENT_FREEZE:
1165 case PM_EVENT_QUIESCE:
1166 return PMSG_RECOVER;
1167 case PM_EVENT_HIBERNATE:
1168 return PMSG_RESTORE;
1173 static void dpm_superior_set_must_resume(struct device *dev)
1175 struct device_link *link;
1179 dev->parent->power.must_resume = true;
1181 idx = device_links_read_lock();
1183 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1184 link->supplier->power.must_resume = true;
1186 device_links_read_unlock(idx);
1190 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1191 * @dev: Device to handle.
1192 * @state: PM transition of the system being carried out.
1193 * @async: If true, the device is being suspended asynchronously.
1195 * The driver of @dev will not receive interrupts while this function is being
1198 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1200 pm_callback_t callback = NULL;
1201 const char *info = NULL;
1207 dpm_wait_for_subordinate(dev, async);
1212 if (dev->power.syscore || dev->power.direct_complete)
1215 if (dev->pm_domain) {
1216 info = "noirq power domain ";
1217 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1218 } else if (dev->type && dev->type->pm) {
1219 info = "noirq type ";
1220 callback = pm_noirq_op(dev->type->pm, state);
1221 } else if (dev->class && dev->class->pm) {
1222 info = "noirq class ";
1223 callback = pm_noirq_op(dev->class->pm, state);
1224 } else if (dev->bus && dev->bus->pm) {
1225 info = "noirq bus ";
1226 callback = pm_noirq_op(dev->bus->pm, state);
1231 if (dev_pm_skip_suspend(dev))
1234 if (dev->driver && dev->driver->pm) {
1235 info = "noirq driver ";
1236 callback = pm_noirq_op(dev->driver->pm, state);
1240 error = dpm_run_callback(callback, dev, state, info);
1242 async_error = error;
1247 dev->power.is_noirq_suspended = true;
1250 * Skipping the resume of devices that were in use right before the
1251 * system suspend (as indicated by their PM-runtime usage counters)
1252 * would be suboptimal. Also resume them if doing that is not allowed
1255 if (atomic_read(&dev->power.usage_count) > 1 ||
1256 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1257 dev->power.may_skip_resume))
1258 dev->power.must_resume = true;
1260 if (dev->power.must_resume)
1261 dpm_superior_set_must_resume(dev);
1264 complete_all(&dev->power.completion);
1265 TRACE_SUSPEND(error);
1269 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1271 struct device *dev = data;
1274 error = __device_suspend_noirq(dev, pm_transition, true);
1276 dpm_save_failed_dev(dev_name(dev));
1277 pm_dev_err(dev, pm_transition, " async", error);
1283 static int device_suspend_noirq(struct device *dev)
1285 if (dpm_async_fn(dev, async_suspend_noirq))
1288 return __device_suspend_noirq(dev, pm_transition, false);
1291 static int dpm_noirq_suspend_devices(pm_message_t state)
1293 ktime_t starttime = ktime_get();
1296 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1297 mutex_lock(&dpm_list_mtx);
1298 pm_transition = state;
1301 while (!list_empty(&dpm_late_early_list)) {
1302 struct device *dev = to_device(dpm_late_early_list.prev);
1305 mutex_unlock(&dpm_list_mtx);
1307 error = device_suspend_noirq(dev);
1309 mutex_lock(&dpm_list_mtx);
1312 pm_dev_err(dev, state, " noirq", error);
1313 dpm_save_failed_dev(dev_name(dev));
1314 } else if (!list_empty(&dev->power.entry)) {
1315 list_move(&dev->power.entry, &dpm_noirq_list);
1318 mutex_unlock(&dpm_list_mtx);
1322 mutex_lock(&dpm_list_mtx);
1324 if (error || async_error)
1327 mutex_unlock(&dpm_list_mtx);
1328 async_synchronize_full();
1330 error = async_error;
1333 suspend_stats.failed_suspend_noirq++;
1334 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1336 dpm_show_time(starttime, state, error, "noirq");
1337 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1342 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1343 * @state: PM transition of the system being carried out.
1345 * Prevent device drivers' interrupt handlers from being called and invoke
1346 * "noirq" suspend callbacks for all non-sysdev devices.
1348 int dpm_suspend_noirq(pm_message_t state)
1352 device_wakeup_arm_wake_irqs();
1353 suspend_device_irqs();
1355 ret = dpm_noirq_suspend_devices(state);
1357 dpm_resume_noirq(resume_event(state));
1362 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1364 struct device *parent = dev->parent;
1369 spin_lock_irq(&parent->power.lock);
1371 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1372 parent->power.wakeup_path = true;
1374 spin_unlock_irq(&parent->power.lock);
1378 * __device_suspend_late - Execute a "late suspend" callback for given device.
1379 * @dev: Device to handle.
1380 * @state: PM transition of the system being carried out.
1381 * @async: If true, the device is being suspended asynchronously.
1383 * Runtime PM is disabled for @dev while this function is being executed.
1385 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1387 pm_callback_t callback = NULL;
1388 const char *info = NULL;
1394 __pm_runtime_disable(dev, false);
1396 dpm_wait_for_subordinate(dev, async);
1401 if (pm_wakeup_pending()) {
1402 async_error = -EBUSY;
1406 if (dev->power.syscore || dev->power.direct_complete)
1409 if (dev->pm_domain) {
1410 info = "late power domain ";
1411 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1412 } else if (dev->type && dev->type->pm) {
1413 info = "late type ";
1414 callback = pm_late_early_op(dev->type->pm, state);
1415 } else if (dev->class && dev->class->pm) {
1416 info = "late class ";
1417 callback = pm_late_early_op(dev->class->pm, state);
1418 } else if (dev->bus && dev->bus->pm) {
1420 callback = pm_late_early_op(dev->bus->pm, state);
1425 if (dev_pm_skip_suspend(dev))
1428 if (dev->driver && dev->driver->pm) {
1429 info = "late driver ";
1430 callback = pm_late_early_op(dev->driver->pm, state);
1434 error = dpm_run_callback(callback, dev, state, info);
1436 async_error = error;
1439 dpm_propagate_wakeup_to_parent(dev);
1442 dev->power.is_late_suspended = true;
1445 TRACE_SUSPEND(error);
1446 complete_all(&dev->power.completion);
1450 static void async_suspend_late(void *data, async_cookie_t cookie)
1452 struct device *dev = data;
1455 error = __device_suspend_late(dev, pm_transition, true);
1457 dpm_save_failed_dev(dev_name(dev));
1458 pm_dev_err(dev, pm_transition, " async", error);
1463 static int device_suspend_late(struct device *dev)
1465 if (dpm_async_fn(dev, async_suspend_late))
1468 return __device_suspend_late(dev, pm_transition, false);
1472 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1473 * @state: PM transition of the system being carried out.
1475 int dpm_suspend_late(pm_message_t state)
1477 ktime_t starttime = ktime_get();
1480 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1481 wake_up_all_idle_cpus();
1482 mutex_lock(&dpm_list_mtx);
1483 pm_transition = state;
1486 while (!list_empty(&dpm_suspended_list)) {
1487 struct device *dev = to_device(dpm_suspended_list.prev);
1491 mutex_unlock(&dpm_list_mtx);
1493 error = device_suspend_late(dev);
1495 mutex_lock(&dpm_list_mtx);
1497 if (!list_empty(&dev->power.entry))
1498 list_move(&dev->power.entry, &dpm_late_early_list);
1501 pm_dev_err(dev, state, " late", error);
1502 dpm_save_failed_dev(dev_name(dev));
1505 mutex_unlock(&dpm_list_mtx);
1509 mutex_lock(&dpm_list_mtx);
1511 if (error || async_error)
1514 mutex_unlock(&dpm_list_mtx);
1515 async_synchronize_full();
1517 error = async_error;
1519 suspend_stats.failed_suspend_late++;
1520 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1521 dpm_resume_early(resume_event(state));
1523 dpm_show_time(starttime, state, error, "late");
1524 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1529 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1530 * @state: PM transition of the system being carried out.
1532 int dpm_suspend_end(pm_message_t state)
1534 ktime_t starttime = ktime_get();
1537 error = dpm_suspend_late(state);
1541 error = dpm_suspend_noirq(state);
1543 dpm_resume_early(resume_event(state));
1546 dpm_show_time(starttime, state, error, "end");
1549 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1552 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1553 * @dev: Device to suspend.
1554 * @state: PM transition of the system being carried out.
1555 * @cb: Suspend callback to execute.
1556 * @info: string description of caller.
1558 static int legacy_suspend(struct device *dev, pm_message_t state,
1559 int (*cb)(struct device *dev, pm_message_t state),
1565 calltime = initcall_debug_start(dev, cb);
1567 trace_device_pm_callback_start(dev, info, state.event);
1568 error = cb(dev, state);
1569 trace_device_pm_callback_end(dev, error);
1570 suspend_report_result(dev, cb, error);
1572 initcall_debug_report(dev, calltime, cb, error);
1577 static void dpm_clear_superiors_direct_complete(struct device *dev)
1579 struct device_link *link;
1583 spin_lock_irq(&dev->parent->power.lock);
1584 dev->parent->power.direct_complete = false;
1585 spin_unlock_irq(&dev->parent->power.lock);
1588 idx = device_links_read_lock();
1590 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1591 spin_lock_irq(&link->supplier->power.lock);
1592 link->supplier->power.direct_complete = false;
1593 spin_unlock_irq(&link->supplier->power.lock);
1596 device_links_read_unlock(idx);
1600 * __device_suspend - Execute "suspend" callbacks for given device.
1601 * @dev: Device to handle.
1602 * @state: PM transition of the system being carried out.
1603 * @async: If true, the device is being suspended asynchronously.
1605 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1607 pm_callback_t callback = NULL;
1608 const char *info = NULL;
1610 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1615 dpm_wait_for_subordinate(dev, async);
1618 dev->power.direct_complete = false;
1623 * Wait for possible runtime PM transitions of the device in progress
1624 * to complete and if there's a runtime resume request pending for it,
1625 * resume it before proceeding with invoking the system-wide suspend
1628 * If the system-wide suspend callbacks below change the configuration
1629 * of the device, they must disable runtime PM for it or otherwise
1630 * ensure that its runtime-resume callbacks will not be confused by that
1631 * change in case they are invoked going forward.
1633 pm_runtime_barrier(dev);
1635 if (pm_wakeup_pending()) {
1636 dev->power.direct_complete = false;
1637 async_error = -EBUSY;
1641 if (dev->power.syscore)
1644 /* Avoid direct_complete to let wakeup_path propagate. */
1645 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1646 dev->power.direct_complete = false;
1648 if (dev->power.direct_complete) {
1649 if (pm_runtime_status_suspended(dev)) {
1650 pm_runtime_disable(dev);
1651 if (pm_runtime_status_suspended(dev)) {
1652 pm_dev_dbg(dev, state, "direct-complete ");
1656 pm_runtime_enable(dev);
1658 dev->power.direct_complete = false;
1661 dev->power.may_skip_resume = true;
1662 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1664 dpm_watchdog_set(&wd, dev);
1667 if (dev->pm_domain) {
1668 info = "power domain ";
1669 callback = pm_op(&dev->pm_domain->ops, state);
1673 if (dev->type && dev->type->pm) {
1675 callback = pm_op(dev->type->pm, state);
1679 if (dev->class && dev->class->pm) {
1681 callback = pm_op(dev->class->pm, state);
1688 callback = pm_op(dev->bus->pm, state);
1689 } else if (dev->bus->suspend) {
1690 pm_dev_dbg(dev, state, "legacy bus ");
1691 error = legacy_suspend(dev, state, dev->bus->suspend,
1698 if (!callback && dev->driver && dev->driver->pm) {
1700 callback = pm_op(dev->driver->pm, state);
1703 error = dpm_run_callback(callback, dev, state, info);
1707 dev->power.is_suspended = true;
1708 if (device_may_wakeup(dev))
1709 dev->power.wakeup_path = true;
1711 dpm_propagate_wakeup_to_parent(dev);
1712 dpm_clear_superiors_direct_complete(dev);
1716 dpm_watchdog_clear(&wd);
1720 async_error = error;
1722 complete_all(&dev->power.completion);
1723 TRACE_SUSPEND(error);
1727 static void async_suspend(void *data, async_cookie_t cookie)
1729 struct device *dev = data;
1732 error = __device_suspend(dev, pm_transition, true);
1734 dpm_save_failed_dev(dev_name(dev));
1735 pm_dev_err(dev, pm_transition, " async", error);
1741 static int device_suspend(struct device *dev)
1743 if (dpm_async_fn(dev, async_suspend))
1746 return __device_suspend(dev, pm_transition, false);
1750 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1751 * @state: PM transition of the system being carried out.
1753 int dpm_suspend(pm_message_t state)
1755 ktime_t starttime = ktime_get();
1758 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1764 mutex_lock(&dpm_list_mtx);
1765 pm_transition = state;
1767 while (!list_empty(&dpm_prepared_list)) {
1768 struct device *dev = to_device(dpm_prepared_list.prev);
1772 mutex_unlock(&dpm_list_mtx);
1774 error = device_suspend(dev);
1776 mutex_lock(&dpm_list_mtx);
1779 pm_dev_err(dev, state, "", error);
1780 dpm_save_failed_dev(dev_name(dev));
1781 } else if (!list_empty(&dev->power.entry)) {
1782 list_move(&dev->power.entry, &dpm_suspended_list);
1785 mutex_unlock(&dpm_list_mtx);
1789 mutex_lock(&dpm_list_mtx);
1791 if (error || async_error)
1794 mutex_unlock(&dpm_list_mtx);
1795 async_synchronize_full();
1797 error = async_error;
1799 suspend_stats.failed_suspend++;
1800 dpm_save_failed_step(SUSPEND_SUSPEND);
1802 dpm_show_time(starttime, state, error, NULL);
1803 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1808 * device_prepare - Prepare a device for system power transition.
1809 * @dev: Device to handle.
1810 * @state: PM transition of the system being carried out.
1812 * Execute the ->prepare() callback(s) for given device. No new children of the
1813 * device may be registered after this function has returned.
1815 static int device_prepare(struct device *dev, pm_message_t state)
1817 int (*callback)(struct device *) = NULL;
1821 * If a device's parent goes into runtime suspend at the wrong time,
1822 * it won't be possible to resume the device. To prevent this we
1823 * block runtime suspend here, during the prepare phase, and allow
1824 * it again during the complete phase.
1826 pm_runtime_get_noresume(dev);
1828 if (dev->power.syscore)
1833 dev->power.wakeup_path = false;
1835 if (dev->power.no_pm_callbacks)
1839 callback = dev->pm_domain->ops.prepare;
1840 else if (dev->type && dev->type->pm)
1841 callback = dev->type->pm->prepare;
1842 else if (dev->class && dev->class->pm)
1843 callback = dev->class->pm->prepare;
1844 else if (dev->bus && dev->bus->pm)
1845 callback = dev->bus->pm->prepare;
1847 if (!callback && dev->driver && dev->driver->pm)
1848 callback = dev->driver->pm->prepare;
1851 ret = callback(dev);
1857 suspend_report_result(dev, callback, ret);
1858 pm_runtime_put(dev);
1862 * A positive return value from ->prepare() means "this device appears
1863 * to be runtime-suspended and its state is fine, so if it really is
1864 * runtime-suspended, you can leave it in that state provided that you
1865 * will do the same thing with all of its descendants". This only
1866 * applies to suspend transitions, however.
1868 spin_lock_irq(&dev->power.lock);
1869 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1870 (ret > 0 || dev->power.no_pm_callbacks) &&
1871 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1872 spin_unlock_irq(&dev->power.lock);
1877 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1878 * @state: PM transition of the system being carried out.
1880 * Execute the ->prepare() callback(s) for all devices.
1882 int dpm_prepare(pm_message_t state)
1886 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1890 * Give a chance for the known devices to complete their probes, before
1891 * disable probing of devices. This sync point is important at least
1892 * at boot time + hibernation restore.
1894 wait_for_device_probe();
1896 * It is unsafe if probing of devices will happen during suspend or
1897 * hibernation and system behavior will be unpredictable in this case.
1898 * So, let's prohibit device's probing here and defer their probes
1899 * instead. The normal behavior will be restored in dpm_complete().
1901 device_block_probing();
1903 mutex_lock(&dpm_list_mtx);
1904 while (!list_empty(&dpm_list) && !error) {
1905 struct device *dev = to_device(dpm_list.next);
1909 mutex_unlock(&dpm_list_mtx);
1911 trace_device_pm_callback_start(dev, "", state.event);
1912 error = device_prepare(dev, state);
1913 trace_device_pm_callback_end(dev, error);
1915 mutex_lock(&dpm_list_mtx);
1918 dev->power.is_prepared = true;
1919 if (!list_empty(&dev->power.entry))
1920 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1921 } else if (error == -EAGAIN) {
1924 dev_info(dev, "not prepared for power transition: code %d\n",
1928 mutex_unlock(&dpm_list_mtx);
1932 mutex_lock(&dpm_list_mtx);
1934 mutex_unlock(&dpm_list_mtx);
1935 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1940 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1941 * @state: PM transition of the system being carried out.
1943 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1944 * callbacks for them.
1946 int dpm_suspend_start(pm_message_t state)
1948 ktime_t starttime = ktime_get();
1951 error = dpm_prepare(state);
1953 suspend_stats.failed_prepare++;
1954 dpm_save_failed_step(SUSPEND_PREPARE);
1956 error = dpm_suspend(state);
1957 dpm_show_time(starttime, state, error, "start");
1960 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1962 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1965 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1967 EXPORT_SYMBOL_GPL(__suspend_report_result);
1970 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1971 * @subordinate: Device that needs to wait for @dev.
1972 * @dev: Device to wait for.
1974 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1976 dpm_wait(dev, subordinate->power.async_suspend);
1979 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1982 * dpm_for_each_dev - device iterator.
1983 * @data: data for the callback.
1984 * @fn: function to be called for each device.
1986 * Iterate over devices in dpm_list, and call @fn for each device,
1989 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1997 list_for_each_entry(dev, &dpm_list, power.entry)
2001 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2003 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2008 return !ops->prepare &&
2010 !ops->suspend_late &&
2011 !ops->suspend_noirq &&
2012 !ops->resume_noirq &&
2013 !ops->resume_early &&
2018 void device_pm_check_callbacks(struct device *dev)
2020 unsigned long flags;
2022 spin_lock_irqsave(&dev->power.lock, flags);
2023 dev->power.no_pm_callbacks =
2024 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2025 !dev->bus->suspend && !dev->bus->resume)) &&
2026 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2027 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2028 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2029 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2030 !dev->driver->suspend && !dev->driver->resume));
2031 spin_unlock_irqrestore(&dev->power.lock, flags);
2034 bool dev_pm_skip_suspend(struct device *dev)
2036 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2037 pm_runtime_status_suspended(dev);