1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
67 static int async_error;
69 static const char *pm_verb(int event)
72 case PM_EVENT_SUSPEND:
78 case PM_EVENT_QUIESCE:
80 case PM_EVENT_HIBERNATE:
84 case PM_EVENT_RESTORE:
86 case PM_EVENT_RECOVER:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device *deva, struct device *devb)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device *deva, struct device *devb)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 if (!pm_print_times_enabled)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
224 if (!pm_print_times_enabled)
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231 (unsigned long long)nsecs >> 10);
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
239 static void dpm_wait(struct device *dev, bool async)
244 if (async || (pm_async_enabled && dev->power.async_suspend))
245 wait_for_completion(&dev->power.completion);
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
250 dpm_wait(dev, *((bool *)async_ptr));
254 static void dpm_wait_for_children(struct device *dev, bool async)
256 device_for_each_child(dev, &async, dpm_wait_fn);
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
261 struct device_link *link;
264 idx = device_links_read_lock();
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
277 device_links_read_unlock(idx);
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
282 struct device *parent;
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
291 mutex_lock(&dpm_list_mtx);
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
298 parent = get_device(dev->parent);
300 mutex_unlock(&dpm_list_mtx);
302 dpm_wait(parent, async);
305 dpm_wait_for_suppliers(dev, async);
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
311 return device_pm_initialized(dev);
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
316 struct device_link *link;
319 idx = device_links_read_lock();
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
334 device_links_read_unlock(idx);
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
344 * pm_op - Return the PM operation appropriate for given PM event.
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
350 switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
354 case PM_EVENT_RESUME:
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
361 case PM_EVENT_HIBERNATE:
362 return ops->poweroff;
364 case PM_EVENT_RECOVER:
366 case PM_EVENT_RESTORE:
368 #endif /* CONFIG_HIBERNATE_CALLBACKS */
375 * pm_late_early_op - Return the PM operation appropriate for given PM event.
376 * @ops: PM operations to choose from.
377 * @state: PM transition of the system being carried out.
379 * Runtime PM is disabled for @dev while this function is being executed.
381 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
384 switch (state.event) {
385 #ifdef CONFIG_SUSPEND
386 case PM_EVENT_SUSPEND:
387 return ops->suspend_late;
388 case PM_EVENT_RESUME:
389 return ops->resume_early;
390 #endif /* CONFIG_SUSPEND */
391 #ifdef CONFIG_HIBERNATE_CALLBACKS
392 case PM_EVENT_FREEZE:
393 case PM_EVENT_QUIESCE:
394 return ops->freeze_late;
395 case PM_EVENT_HIBERNATE:
396 return ops->poweroff_late;
398 case PM_EVENT_RECOVER:
399 return ops->thaw_early;
400 case PM_EVENT_RESTORE:
401 return ops->restore_early;
402 #endif /* CONFIG_HIBERNATE_CALLBACKS */
409 * pm_noirq_op - Return the PM operation appropriate for given PM event.
410 * @ops: PM operations to choose from.
411 * @state: PM transition of the system being carried out.
413 * The driver of @dev will not receive interrupts while this function is being
416 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
418 switch (state.event) {
419 #ifdef CONFIG_SUSPEND
420 case PM_EVENT_SUSPEND:
421 return ops->suspend_noirq;
422 case PM_EVENT_RESUME:
423 return ops->resume_noirq;
424 #endif /* CONFIG_SUSPEND */
425 #ifdef CONFIG_HIBERNATE_CALLBACKS
426 case PM_EVENT_FREEZE:
427 case PM_EVENT_QUIESCE:
428 return ops->freeze_noirq;
429 case PM_EVENT_HIBERNATE:
430 return ops->poweroff_noirq;
432 case PM_EVENT_RECOVER:
433 return ops->thaw_noirq;
434 case PM_EVENT_RESTORE:
435 return ops->restore_noirq;
436 #endif /* CONFIG_HIBERNATE_CALLBACKS */
442 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
444 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
445 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
446 ", may wakeup" : "");
449 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
452 pr_err("Device %s failed to %s%s: error %d\n",
453 dev_name(dev), pm_verb(state.event), info, error);
456 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
463 calltime = ktime_get();
464 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
465 do_div(usecs64, NSEC_PER_USEC);
470 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
471 info ?: "", info ? " " : "", pm_verb(state.event),
472 error ? "aborted" : "complete",
473 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
476 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
477 pm_message_t state, const char *info)
485 calltime = initcall_debug_start(dev, cb);
487 pm_dev_dbg(dev, state, info);
488 trace_device_pm_callback_start(dev, info, state.event);
490 trace_device_pm_callback_end(dev, error);
491 suspend_report_result(cb, error);
493 initcall_debug_report(dev, calltime, cb, error);
498 #ifdef CONFIG_DPM_WATCHDOG
499 struct dpm_watchdog {
501 struct task_struct *tsk;
502 struct timer_list timer;
505 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
506 struct dpm_watchdog wd
509 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
510 * @t: The timer that PM watchdog depends on.
512 * Called when a driver has timed out suspending or resuming.
513 * There's not much we can do here to recover so panic() to
514 * capture a crash-dump in pstore.
516 static void dpm_watchdog_handler(struct timer_list *t)
518 struct dpm_watchdog *wd = from_timer(wd, t, timer);
520 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521 show_stack(wd->tsk, NULL, KERN_EMERG);
522 panic("%s %s: unrecoverable failure\n",
523 dev_driver_string(wd->dev), dev_name(wd->dev));
527 * dpm_watchdog_set - Enable pm watchdog for given device.
528 * @wd: Watchdog. Must be allocated on the stack.
529 * @dev: Device to handle.
531 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533 struct timer_list *timer = &wd->timer;
538 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
539 /* use same timeout value for both suspend and resume */
540 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
545 * dpm_watchdog_clear - Disable suspend/resume watchdog.
546 * @wd: Watchdog to disable.
548 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550 struct timer_list *timer = &wd->timer;
552 del_timer_sync(timer);
553 destroy_timer_on_stack(timer);
556 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
557 #define dpm_watchdog_set(x, y)
558 #define dpm_watchdog_clear(x)
561 /*------------------------- Resume routines -------------------------*/
564 * dev_pm_skip_resume - System-wide device resume optimization check.
565 * @dev: Target device.
568 * - %false if the transition under way is RESTORE.
569 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
570 * - The logical negation of %power.must_resume otherwise (that is, when the
571 * transition under way is RESUME).
573 bool dev_pm_skip_resume(struct device *dev)
575 if (pm_transition.event == PM_EVENT_RESTORE)
578 if (pm_transition.event == PM_EVENT_THAW)
579 return dev_pm_skip_suspend(dev);
581 return !dev->power.must_resume;
585 * device_resume_noirq - Execute a "noirq resume" callback for given device.
586 * @dev: Device to handle.
587 * @state: PM transition of the system being carried out.
588 * @async: If true, the device is being resumed asynchronously.
590 * The driver of @dev will not receive interrupts while this function is being
593 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
595 pm_callback_t callback = NULL;
596 const char *info = NULL;
603 if (dev->power.syscore || dev->power.direct_complete)
606 if (!dev->power.is_noirq_suspended)
609 if (!dpm_wait_for_superior(dev, async))
612 skip_resume = dev_pm_skip_resume(dev);
614 * If the driver callback is skipped below or by the middle layer
615 * callback and device_resume_early() also skips the driver callback for
616 * this device later, it needs to appear as "suspended" to PM-runtime,
617 * so change its status accordingly.
619 * Otherwise, the device is going to be resumed, so set its PM-runtime
620 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
621 * to avoid confusing drivers that don't use it.
624 pm_runtime_set_suspended(dev);
625 else if (dev_pm_skip_suspend(dev))
626 pm_runtime_set_active(dev);
628 if (dev->pm_domain) {
629 info = "noirq power domain ";
630 callback = pm_noirq_op(&dev->pm_domain->ops, state);
631 } else if (dev->type && dev->type->pm) {
632 info = "noirq type ";
633 callback = pm_noirq_op(dev->type->pm, state);
634 } else if (dev->class && dev->class->pm) {
635 info = "noirq class ";
636 callback = pm_noirq_op(dev->class->pm, state);
637 } else if (dev->bus && dev->bus->pm) {
639 callback = pm_noirq_op(dev->bus->pm, state);
647 if (dev->driver && dev->driver->pm) {
648 info = "noirq driver ";
649 callback = pm_noirq_op(dev->driver->pm, state);
653 error = dpm_run_callback(callback, dev, state, info);
656 dev->power.is_noirq_suspended = false;
659 complete_all(&dev->power.completion);
664 static bool is_async(struct device *dev)
666 return dev->power.async_suspend && pm_async_enabled
667 && !pm_trace_is_enabled();
670 static bool dpm_async_fn(struct device *dev, async_func_t func)
672 reinit_completion(&dev->power.completion);
676 async_schedule_dev(func, dev);
683 static void async_resume_noirq(void *data, async_cookie_t cookie)
685 struct device *dev = (struct device *)data;
688 error = device_resume_noirq(dev, pm_transition, true);
690 pm_dev_err(dev, pm_transition, " async", error);
695 static void dpm_noirq_resume_devices(pm_message_t state)
698 ktime_t starttime = ktime_get();
700 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
701 mutex_lock(&dpm_list_mtx);
702 pm_transition = state;
705 * Advanced the async threads upfront,
706 * in case the starting of async threads is
707 * delayed by non-async resuming devices.
709 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
710 dpm_async_fn(dev, async_resume_noirq);
712 while (!list_empty(&dpm_noirq_list)) {
713 dev = to_device(dpm_noirq_list.next);
715 list_move_tail(&dev->power.entry, &dpm_late_early_list);
716 mutex_unlock(&dpm_list_mtx);
718 if (!is_async(dev)) {
721 error = device_resume_noirq(dev, state, false);
723 suspend_stats.failed_resume_noirq++;
724 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
725 dpm_save_failed_dev(dev_name(dev));
726 pm_dev_err(dev, state, " noirq", error);
730 mutex_lock(&dpm_list_mtx);
733 mutex_unlock(&dpm_list_mtx);
734 async_synchronize_full();
735 dpm_show_time(starttime, state, 0, "noirq");
736 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
740 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
741 * @state: PM transition of the system being carried out.
743 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
744 * allow device drivers' interrupt handlers to be called.
746 void dpm_resume_noirq(pm_message_t state)
748 dpm_noirq_resume_devices(state);
750 resume_device_irqs();
751 device_wakeup_disarm_wake_irqs();
757 * device_resume_early - Execute an "early resume" callback for given device.
758 * @dev: Device to handle.
759 * @state: PM transition of the system being carried out.
760 * @async: If true, the device is being resumed asynchronously.
762 * Runtime PM is disabled for @dev while this function is being executed.
764 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
766 pm_callback_t callback = NULL;
767 const char *info = NULL;
773 if (dev->power.syscore || dev->power.direct_complete)
776 if (!dev->power.is_late_suspended)
779 if (!dpm_wait_for_superior(dev, async))
782 if (dev->pm_domain) {
783 info = "early power domain ";
784 callback = pm_late_early_op(&dev->pm_domain->ops, state);
785 } else if (dev->type && dev->type->pm) {
786 info = "early type ";
787 callback = pm_late_early_op(dev->type->pm, state);
788 } else if (dev->class && dev->class->pm) {
789 info = "early class ";
790 callback = pm_late_early_op(dev->class->pm, state);
791 } else if (dev->bus && dev->bus->pm) {
793 callback = pm_late_early_op(dev->bus->pm, state);
798 if (dev_pm_skip_resume(dev))
801 if (dev->driver && dev->driver->pm) {
802 info = "early driver ";
803 callback = pm_late_early_op(dev->driver->pm, state);
807 error = dpm_run_callback(callback, dev, state, info);
810 dev->power.is_late_suspended = false;
815 pm_runtime_enable(dev);
816 complete_all(&dev->power.completion);
820 static void async_resume_early(void *data, async_cookie_t cookie)
822 struct device *dev = (struct device *)data;
825 error = device_resume_early(dev, pm_transition, true);
827 pm_dev_err(dev, pm_transition, " async", error);
833 * dpm_resume_early - Execute "early resume" callbacks for all devices.
834 * @state: PM transition of the system being carried out.
836 void dpm_resume_early(pm_message_t state)
839 ktime_t starttime = ktime_get();
841 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
842 mutex_lock(&dpm_list_mtx);
843 pm_transition = state;
846 * Advanced the async threads upfront,
847 * in case the starting of async threads is
848 * delayed by non-async resuming devices.
850 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
851 dpm_async_fn(dev, async_resume_early);
853 while (!list_empty(&dpm_late_early_list)) {
854 dev = to_device(dpm_late_early_list.next);
856 list_move_tail(&dev->power.entry, &dpm_suspended_list);
857 mutex_unlock(&dpm_list_mtx);
859 if (!is_async(dev)) {
862 error = device_resume_early(dev, state, false);
864 suspend_stats.failed_resume_early++;
865 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
866 dpm_save_failed_dev(dev_name(dev));
867 pm_dev_err(dev, state, " early", error);
870 mutex_lock(&dpm_list_mtx);
873 mutex_unlock(&dpm_list_mtx);
874 async_synchronize_full();
875 dpm_show_time(starttime, state, 0, "early");
876 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
880 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
881 * @state: PM transition of the system being carried out.
883 void dpm_resume_start(pm_message_t state)
885 dpm_resume_noirq(state);
886 dpm_resume_early(state);
888 EXPORT_SYMBOL_GPL(dpm_resume_start);
891 * device_resume - Execute "resume" callbacks for given device.
892 * @dev: Device to handle.
893 * @state: PM transition of the system being carried out.
894 * @async: If true, the device is being resumed asynchronously.
896 static int device_resume(struct device *dev, pm_message_t state, bool async)
898 pm_callback_t callback = NULL;
899 const char *info = NULL;
901 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
906 if (dev->power.syscore)
909 if (dev->power.direct_complete) {
910 /* Match the pm_runtime_disable() in __device_suspend(). */
911 pm_runtime_enable(dev);
915 if (!dpm_wait_for_superior(dev, async))
918 dpm_watchdog_set(&wd, dev);
922 * This is a fib. But we'll allow new children to be added below
923 * a resumed device, even if the device hasn't been completed yet.
925 dev->power.is_prepared = false;
927 if (!dev->power.is_suspended)
930 if (dev->pm_domain) {
931 info = "power domain ";
932 callback = pm_op(&dev->pm_domain->ops, state);
936 if (dev->type && dev->type->pm) {
938 callback = pm_op(dev->type->pm, state);
942 if (dev->class && dev->class->pm) {
944 callback = pm_op(dev->class->pm, state);
951 callback = pm_op(dev->bus->pm, state);
952 } else if (dev->bus->resume) {
953 info = "legacy bus ";
954 callback = dev->bus->resume;
960 if (!callback && dev->driver && dev->driver->pm) {
962 callback = pm_op(dev->driver->pm, state);
966 error = dpm_run_callback(callback, dev, state, info);
967 dev->power.is_suspended = false;
971 dpm_watchdog_clear(&wd);
974 complete_all(&dev->power.completion);
981 static void async_resume(void *data, async_cookie_t cookie)
983 struct device *dev = (struct device *)data;
986 error = device_resume(dev, pm_transition, true);
988 pm_dev_err(dev, pm_transition, " async", error);
993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994 * @state: PM transition of the system being carried out.
996 * Execute the appropriate "resume" callback for all devices whose status
997 * indicates that they are suspended.
999 void dpm_resume(pm_message_t state)
1002 ktime_t starttime = ktime_get();
1004 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1007 mutex_lock(&dpm_list_mtx);
1008 pm_transition = state;
1011 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1012 dpm_async_fn(dev, async_resume);
1014 while (!list_empty(&dpm_suspended_list)) {
1015 dev = to_device(dpm_suspended_list.next);
1017 if (!is_async(dev)) {
1020 mutex_unlock(&dpm_list_mtx);
1022 error = device_resume(dev, state, false);
1024 suspend_stats.failed_resume++;
1025 dpm_save_failed_step(SUSPEND_RESUME);
1026 dpm_save_failed_dev(dev_name(dev));
1027 pm_dev_err(dev, state, "", error);
1030 mutex_lock(&dpm_list_mtx);
1032 if (!list_empty(&dev->power.entry))
1033 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1036 mutex_unlock(&dpm_list_mtx);
1037 async_synchronize_full();
1038 dpm_show_time(starttime, state, 0, NULL);
1042 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1046 * device_complete - Complete a PM transition for given device.
1047 * @dev: Device to handle.
1048 * @state: PM transition of the system being carried out.
1050 static void device_complete(struct device *dev, pm_message_t state)
1052 void (*callback)(struct device *) = NULL;
1053 const char *info = NULL;
1055 if (dev->power.syscore)
1060 if (dev->pm_domain) {
1061 info = "completing power domain ";
1062 callback = dev->pm_domain->ops.complete;
1063 } else if (dev->type && dev->type->pm) {
1064 info = "completing type ";
1065 callback = dev->type->pm->complete;
1066 } else if (dev->class && dev->class->pm) {
1067 info = "completing class ";
1068 callback = dev->class->pm->complete;
1069 } else if (dev->bus && dev->bus->pm) {
1070 info = "completing bus ";
1071 callback = dev->bus->pm->complete;
1074 if (!callback && dev->driver && dev->driver->pm) {
1075 info = "completing driver ";
1076 callback = dev->driver->pm->complete;
1080 pm_dev_dbg(dev, state, info);
1087 pm_runtime_put(dev);
1091 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092 * @state: PM transition of the system being carried out.
1094 * Execute the ->complete() callbacks for all devices whose PM status is not
1095 * DPM_ON (this allows new devices to be registered).
1097 void dpm_complete(pm_message_t state)
1099 struct list_head list;
1101 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1104 INIT_LIST_HEAD(&list);
1105 mutex_lock(&dpm_list_mtx);
1106 while (!list_empty(&dpm_prepared_list)) {
1107 struct device *dev = to_device(dpm_prepared_list.prev);
1110 dev->power.is_prepared = false;
1111 list_move(&dev->power.entry, &list);
1112 mutex_unlock(&dpm_list_mtx);
1114 trace_device_pm_callback_start(dev, "", state.event);
1115 device_complete(dev, state);
1116 trace_device_pm_callback_end(dev, 0);
1118 mutex_lock(&dpm_list_mtx);
1121 list_splice(&list, &dpm_list);
1122 mutex_unlock(&dpm_list_mtx);
1124 /* Allow device probing and trigger re-probing of deferred devices */
1125 device_unblock_probing();
1126 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1130 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131 * @state: PM transition of the system being carried out.
1133 * Execute "resume" callbacks for all devices and complete the PM transition of
1136 void dpm_resume_end(pm_message_t state)
1139 dpm_complete(state);
1141 EXPORT_SYMBOL_GPL(dpm_resume_end);
1144 /*------------------------- Suspend routines -------------------------*/
1147 * resume_event - Return a "resume" message for given "suspend" sleep state.
1148 * @sleep_state: PM message representing a sleep state.
1150 * Return a PM message representing the resume event corresponding to given
1153 static pm_message_t resume_event(pm_message_t sleep_state)
1155 switch (sleep_state.event) {
1156 case PM_EVENT_SUSPEND:
1158 case PM_EVENT_FREEZE:
1159 case PM_EVENT_QUIESCE:
1160 return PMSG_RECOVER;
1161 case PM_EVENT_HIBERNATE:
1162 return PMSG_RESTORE;
1167 static void dpm_superior_set_must_resume(struct device *dev)
1169 struct device_link *link;
1173 dev->parent->power.must_resume = true;
1175 idx = device_links_read_lock();
1177 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178 link->supplier->power.must_resume = true;
1180 device_links_read_unlock(idx);
1184 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185 * @dev: Device to handle.
1186 * @state: PM transition of the system being carried out.
1187 * @async: If true, the device is being suspended asynchronously.
1189 * The driver of @dev will not receive interrupts while this function is being
1192 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1194 pm_callback_t callback = NULL;
1195 const char *info = NULL;
1201 dpm_wait_for_subordinate(dev, async);
1206 if (dev->power.syscore || dev->power.direct_complete)
1209 if (dev->pm_domain) {
1210 info = "noirq power domain ";
1211 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212 } else if (dev->type && dev->type->pm) {
1213 info = "noirq type ";
1214 callback = pm_noirq_op(dev->type->pm, state);
1215 } else if (dev->class && dev->class->pm) {
1216 info = "noirq class ";
1217 callback = pm_noirq_op(dev->class->pm, state);
1218 } else if (dev->bus && dev->bus->pm) {
1219 info = "noirq bus ";
1220 callback = pm_noirq_op(dev->bus->pm, state);
1225 if (dev_pm_skip_suspend(dev))
1228 if (dev->driver && dev->driver->pm) {
1229 info = "noirq driver ";
1230 callback = pm_noirq_op(dev->driver->pm, state);
1234 error = dpm_run_callback(callback, dev, state, info);
1236 async_error = error;
1241 dev->power.is_noirq_suspended = true;
1244 * Skipping the resume of devices that were in use right before the
1245 * system suspend (as indicated by their PM-runtime usage counters)
1246 * would be suboptimal. Also resume them if doing that is not allowed
1249 if (atomic_read(&dev->power.usage_count) > 1 ||
1250 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251 dev->power.may_skip_resume))
1252 dev->power.must_resume = true;
1254 if (dev->power.must_resume)
1255 dpm_superior_set_must_resume(dev);
1258 complete_all(&dev->power.completion);
1259 TRACE_SUSPEND(error);
1263 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1265 struct device *dev = (struct device *)data;
1268 error = __device_suspend_noirq(dev, pm_transition, true);
1270 dpm_save_failed_dev(dev_name(dev));
1271 pm_dev_err(dev, pm_transition, " async", error);
1277 static int device_suspend_noirq(struct device *dev)
1279 if (dpm_async_fn(dev, async_suspend_noirq))
1282 return __device_suspend_noirq(dev, pm_transition, false);
1285 static int dpm_noirq_suspend_devices(pm_message_t state)
1287 ktime_t starttime = ktime_get();
1290 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291 mutex_lock(&dpm_list_mtx);
1292 pm_transition = state;
1295 while (!list_empty(&dpm_late_early_list)) {
1296 struct device *dev = to_device(dpm_late_early_list.prev);
1299 mutex_unlock(&dpm_list_mtx);
1301 error = device_suspend_noirq(dev);
1303 mutex_lock(&dpm_list_mtx);
1305 pm_dev_err(dev, state, " noirq", error);
1306 dpm_save_failed_dev(dev_name(dev));
1310 if (!list_empty(&dev->power.entry))
1311 list_move(&dev->power.entry, &dpm_noirq_list);
1317 mutex_unlock(&dpm_list_mtx);
1318 async_synchronize_full();
1320 error = async_error;
1323 suspend_stats.failed_suspend_noirq++;
1324 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1326 dpm_show_time(starttime, state, error, "noirq");
1327 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1338 int dpm_suspend_noirq(pm_message_t state)
1344 device_wakeup_arm_wake_irqs();
1345 suspend_device_irqs();
1347 ret = dpm_noirq_suspend_devices(state);
1349 dpm_resume_noirq(resume_event(state));
1354 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1356 struct device *parent = dev->parent;
1361 spin_lock_irq(&parent->power.lock);
1363 if (dev->power.wakeup_path && !parent->power.ignore_children)
1364 parent->power.wakeup_path = true;
1366 spin_unlock_irq(&parent->power.lock);
1370 * __device_suspend_late - Execute a "late suspend" callback for given device.
1371 * @dev: Device to handle.
1372 * @state: PM transition of the system being carried out.
1373 * @async: If true, the device is being suspended asynchronously.
1375 * Runtime PM is disabled for @dev while this function is being executed.
1377 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1379 pm_callback_t callback = NULL;
1380 const char *info = NULL;
1386 __pm_runtime_disable(dev, false);
1388 dpm_wait_for_subordinate(dev, async);
1393 if (pm_wakeup_pending()) {
1394 async_error = -EBUSY;
1398 if (dev->power.syscore || dev->power.direct_complete)
1401 if (dev->pm_domain) {
1402 info = "late power domain ";
1403 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404 } else if (dev->type && dev->type->pm) {
1405 info = "late type ";
1406 callback = pm_late_early_op(dev->type->pm, state);
1407 } else if (dev->class && dev->class->pm) {
1408 info = "late class ";
1409 callback = pm_late_early_op(dev->class->pm, state);
1410 } else if (dev->bus && dev->bus->pm) {
1412 callback = pm_late_early_op(dev->bus->pm, state);
1417 if (dev_pm_skip_suspend(dev))
1420 if (dev->driver && dev->driver->pm) {
1421 info = "late driver ";
1422 callback = pm_late_early_op(dev->driver->pm, state);
1426 error = dpm_run_callback(callback, dev, state, info);
1428 async_error = error;
1431 dpm_propagate_wakeup_to_parent(dev);
1434 dev->power.is_late_suspended = true;
1437 TRACE_SUSPEND(error);
1438 complete_all(&dev->power.completion);
1442 static void async_suspend_late(void *data, async_cookie_t cookie)
1444 struct device *dev = (struct device *)data;
1447 error = __device_suspend_late(dev, pm_transition, true);
1449 dpm_save_failed_dev(dev_name(dev));
1450 pm_dev_err(dev, pm_transition, " async", error);
1455 static int device_suspend_late(struct device *dev)
1457 if (dpm_async_fn(dev, async_suspend_late))
1460 return __device_suspend_late(dev, pm_transition, false);
1464 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465 * @state: PM transition of the system being carried out.
1467 int dpm_suspend_late(pm_message_t state)
1469 ktime_t starttime = ktime_get();
1472 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473 mutex_lock(&dpm_list_mtx);
1474 pm_transition = state;
1477 while (!list_empty(&dpm_suspended_list)) {
1478 struct device *dev = to_device(dpm_suspended_list.prev);
1481 mutex_unlock(&dpm_list_mtx);
1483 error = device_suspend_late(dev);
1485 mutex_lock(&dpm_list_mtx);
1486 if (!list_empty(&dev->power.entry))
1487 list_move(&dev->power.entry, &dpm_late_early_list);
1490 pm_dev_err(dev, state, " late", error);
1491 dpm_save_failed_dev(dev_name(dev));
1500 mutex_unlock(&dpm_list_mtx);
1501 async_synchronize_full();
1503 error = async_error;
1505 suspend_stats.failed_suspend_late++;
1506 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507 dpm_resume_early(resume_event(state));
1509 dpm_show_time(starttime, state, error, "late");
1510 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1515 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516 * @state: PM transition of the system being carried out.
1518 int dpm_suspend_end(pm_message_t state)
1520 ktime_t starttime = ktime_get();
1523 error = dpm_suspend_late(state);
1527 error = dpm_suspend_noirq(state);
1529 dpm_resume_early(resume_event(state));
1532 dpm_show_time(starttime, state, error, "end");
1535 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1538 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539 * @dev: Device to suspend.
1540 * @state: PM transition of the system being carried out.
1541 * @cb: Suspend callback to execute.
1542 * @info: string description of caller.
1544 static int legacy_suspend(struct device *dev, pm_message_t state,
1545 int (*cb)(struct device *dev, pm_message_t state),
1551 calltime = initcall_debug_start(dev, cb);
1553 trace_device_pm_callback_start(dev, info, state.event);
1554 error = cb(dev, state);
1555 trace_device_pm_callback_end(dev, error);
1556 suspend_report_result(cb, error);
1558 initcall_debug_report(dev, calltime, cb, error);
1563 static void dpm_clear_superiors_direct_complete(struct device *dev)
1565 struct device_link *link;
1569 spin_lock_irq(&dev->parent->power.lock);
1570 dev->parent->power.direct_complete = false;
1571 spin_unlock_irq(&dev->parent->power.lock);
1574 idx = device_links_read_lock();
1576 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577 spin_lock_irq(&link->supplier->power.lock);
1578 link->supplier->power.direct_complete = false;
1579 spin_unlock_irq(&link->supplier->power.lock);
1582 device_links_read_unlock(idx);
1586 * __device_suspend - Execute "suspend" callbacks for given device.
1587 * @dev: Device to handle.
1588 * @state: PM transition of the system being carried out.
1589 * @async: If true, the device is being suspended asynchronously.
1591 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1593 pm_callback_t callback = NULL;
1594 const char *info = NULL;
1596 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1601 dpm_wait_for_subordinate(dev, async);
1604 dev->power.direct_complete = false;
1609 * Wait for possible runtime PM transitions of the device in progress
1610 * to complete and if there's a runtime resume request pending for it,
1611 * resume it before proceeding with invoking the system-wide suspend
1614 * If the system-wide suspend callbacks below change the configuration
1615 * of the device, they must disable runtime PM for it or otherwise
1616 * ensure that its runtime-resume callbacks will not be confused by that
1617 * change in case they are invoked going forward.
1619 pm_runtime_barrier(dev);
1621 if (pm_wakeup_pending()) {
1622 dev->power.direct_complete = false;
1623 async_error = -EBUSY;
1627 if (dev->power.syscore)
1630 /* Avoid direct_complete to let wakeup_path propagate. */
1631 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632 dev->power.direct_complete = false;
1634 if (dev->power.direct_complete) {
1635 if (pm_runtime_status_suspended(dev)) {
1636 pm_runtime_disable(dev);
1637 if (pm_runtime_status_suspended(dev)) {
1638 pm_dev_dbg(dev, state, "direct-complete ");
1642 pm_runtime_enable(dev);
1644 dev->power.direct_complete = false;
1647 dev->power.may_skip_resume = true;
1648 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1650 dpm_watchdog_set(&wd, dev);
1653 if (dev->pm_domain) {
1654 info = "power domain ";
1655 callback = pm_op(&dev->pm_domain->ops, state);
1659 if (dev->type && dev->type->pm) {
1661 callback = pm_op(dev->type->pm, state);
1665 if (dev->class && dev->class->pm) {
1667 callback = pm_op(dev->class->pm, state);
1674 callback = pm_op(dev->bus->pm, state);
1675 } else if (dev->bus->suspend) {
1676 pm_dev_dbg(dev, state, "legacy bus ");
1677 error = legacy_suspend(dev, state, dev->bus->suspend,
1684 if (!callback && dev->driver && dev->driver->pm) {
1686 callback = pm_op(dev->driver->pm, state);
1689 error = dpm_run_callback(callback, dev, state, info);
1693 dev->power.is_suspended = true;
1694 if (device_may_wakeup(dev))
1695 dev->power.wakeup_path = true;
1697 dpm_propagate_wakeup_to_parent(dev);
1698 dpm_clear_superiors_direct_complete(dev);
1702 dpm_watchdog_clear(&wd);
1706 async_error = error;
1708 complete_all(&dev->power.completion);
1709 TRACE_SUSPEND(error);
1713 static void async_suspend(void *data, async_cookie_t cookie)
1715 struct device *dev = (struct device *)data;
1718 error = __device_suspend(dev, pm_transition, true);
1720 dpm_save_failed_dev(dev_name(dev));
1721 pm_dev_err(dev, pm_transition, " async", error);
1727 static int device_suspend(struct device *dev)
1729 if (dpm_async_fn(dev, async_suspend))
1732 return __device_suspend(dev, pm_transition, false);
1736 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737 * @state: PM transition of the system being carried out.
1739 int dpm_suspend(pm_message_t state)
1741 ktime_t starttime = ktime_get();
1744 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1750 mutex_lock(&dpm_list_mtx);
1751 pm_transition = state;
1753 while (!list_empty(&dpm_prepared_list)) {
1754 struct device *dev = to_device(dpm_prepared_list.prev);
1757 mutex_unlock(&dpm_list_mtx);
1759 error = device_suspend(dev);
1761 mutex_lock(&dpm_list_mtx);
1763 pm_dev_err(dev, state, "", error);
1764 dpm_save_failed_dev(dev_name(dev));
1768 if (!list_empty(&dev->power.entry))
1769 list_move(&dev->power.entry, &dpm_suspended_list);
1774 mutex_unlock(&dpm_list_mtx);
1775 async_synchronize_full();
1777 error = async_error;
1779 suspend_stats.failed_suspend++;
1780 dpm_save_failed_step(SUSPEND_SUSPEND);
1782 dpm_show_time(starttime, state, error, NULL);
1783 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1788 * device_prepare - Prepare a device for system power transition.
1789 * @dev: Device to handle.
1790 * @state: PM transition of the system being carried out.
1792 * Execute the ->prepare() callback(s) for given device. No new children of the
1793 * device may be registered after this function has returned.
1795 static int device_prepare(struct device *dev, pm_message_t state)
1797 int (*callback)(struct device *) = NULL;
1801 * If a device's parent goes into runtime suspend at the wrong time,
1802 * it won't be possible to resume the device. To prevent this we
1803 * block runtime suspend here, during the prepare phase, and allow
1804 * it again during the complete phase.
1806 pm_runtime_get_noresume(dev);
1808 if (dev->power.syscore)
1813 dev->power.wakeup_path = false;
1815 if (dev->power.no_pm_callbacks)
1819 callback = dev->pm_domain->ops.prepare;
1820 else if (dev->type && dev->type->pm)
1821 callback = dev->type->pm->prepare;
1822 else if (dev->class && dev->class->pm)
1823 callback = dev->class->pm->prepare;
1824 else if (dev->bus && dev->bus->pm)
1825 callback = dev->bus->pm->prepare;
1827 if (!callback && dev->driver && dev->driver->pm)
1828 callback = dev->driver->pm->prepare;
1831 ret = callback(dev);
1837 suspend_report_result(callback, ret);
1838 pm_runtime_put(dev);
1842 * A positive return value from ->prepare() means "this device appears
1843 * to be runtime-suspended and its state is fine, so if it really is
1844 * runtime-suspended, you can leave it in that state provided that you
1845 * will do the same thing with all of its descendants". This only
1846 * applies to suspend transitions, however.
1848 spin_lock_irq(&dev->power.lock);
1849 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850 (ret > 0 || dev->power.no_pm_callbacks) &&
1851 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1852 spin_unlock_irq(&dev->power.lock);
1857 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858 * @state: PM transition of the system being carried out.
1860 * Execute the ->prepare() callback(s) for all devices.
1862 int dpm_prepare(pm_message_t state)
1866 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1870 * Give a chance for the known devices to complete their probes, before
1871 * disable probing of devices. This sync point is important at least
1872 * at boot time + hibernation restore.
1874 wait_for_device_probe();
1876 * It is unsafe if probing of devices will happen during suspend or
1877 * hibernation and system behavior will be unpredictable in this case.
1878 * So, let's prohibit device's probing here and defer their probes
1879 * instead. The normal behavior will be restored in dpm_complete().
1881 device_block_probing();
1883 mutex_lock(&dpm_list_mtx);
1884 while (!list_empty(&dpm_list)) {
1885 struct device *dev = to_device(dpm_list.next);
1888 mutex_unlock(&dpm_list_mtx);
1890 trace_device_pm_callback_start(dev, "", state.event);
1891 error = device_prepare(dev, state);
1892 trace_device_pm_callback_end(dev, error);
1894 mutex_lock(&dpm_list_mtx);
1896 if (error == -EAGAIN) {
1901 pr_info("Device %s not prepared for power transition: code %d\n",
1902 dev_name(dev), error);
1906 dev->power.is_prepared = true;
1907 if (!list_empty(&dev->power.entry))
1908 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1911 mutex_unlock(&dpm_list_mtx);
1912 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1917 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918 * @state: PM transition of the system being carried out.
1920 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921 * callbacks for them.
1923 int dpm_suspend_start(pm_message_t state)
1925 ktime_t starttime = ktime_get();
1928 error = dpm_prepare(state);
1930 suspend_stats.failed_prepare++;
1931 dpm_save_failed_step(SUSPEND_PREPARE);
1933 error = dpm_suspend(state);
1934 dpm_show_time(starttime, state, error, "start");
1937 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1939 void __suspend_report_result(const char *function, void *fn, int ret)
1942 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1944 EXPORT_SYMBOL_GPL(__suspend_report_result);
1947 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948 * @subordinate: Device that needs to wait for @dev.
1949 * @dev: Device to wait for.
1951 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1953 dpm_wait(dev, subordinate->power.async_suspend);
1956 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1959 * dpm_for_each_dev - device iterator.
1960 * @data: data for the callback.
1961 * @fn: function to be called for each device.
1963 * Iterate over devices in dpm_list, and call @fn for each device,
1966 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1974 list_for_each_entry(dev, &dpm_list, power.entry)
1978 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1980 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1985 return !ops->prepare &&
1987 !ops->suspend_late &&
1988 !ops->suspend_noirq &&
1989 !ops->resume_noirq &&
1990 !ops->resume_early &&
1995 void device_pm_check_callbacks(struct device *dev)
1997 unsigned long flags;
1999 spin_lock_irqsave(&dev->power.lock, flags);
2000 dev->power.no_pm_callbacks =
2001 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2002 !dev->bus->suspend && !dev->bus->resume)) &&
2003 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2004 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2005 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2006 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2007 !dev->driver->suspend && !dev->driver->resume));
2008 spin_unlock_irqrestore(&dev->power.lock, flags);
2011 bool dev_pm_skip_suspend(struct device *dev)
2013 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2014 pm_runtime_status_suspended(dev);