2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static const char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 dev->power.in_dpm_list = true;
136 mutex_unlock(&dpm_list_mtx);
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
143 void device_pm_remove(struct device *dev)
145 pr_debug("PM: Removing info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 complete_all(&dev->power.completion);
148 mutex_lock(&dpm_list_mtx);
149 list_del_init(&dev->power.entry);
150 dev->power.in_dpm_list = false;
151 mutex_unlock(&dpm_list_mtx);
152 device_wakeup_disable(dev);
153 pm_runtime_remove(dev);
154 device_pm_check_callbacks(dev);
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
162 void device_pm_move_before(struct device *deva, struct device *devb)
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva->power.entry, &devb->power.entry);
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
176 void device_pm_move_after(struct device *deva, struct device *devb)
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva->power.entry, &devb->power.entry);
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
189 void device_pm_move_last(struct device *dev)
191 pr_debug("PM: Moving %s:%s to end of list\n",
192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193 list_move_tail(&dev->power.entry, &dpm_list);
196 static ktime_t initcall_debug_start(struct device *dev)
198 ktime_t calltime = 0;
200 if (pm_print_times_enabled) {
201 pr_info("calling %s+ @ %i, parent: %s\n",
202 dev_name(dev), task_pid_nr(current),
203 dev->parent ? dev_name(dev->parent) : "none");
204 calltime = ktime_get();
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211 int error, pm_message_t state,
217 rettime = ktime_get();
218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
220 if (pm_print_times_enabled) {
221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222 error, (unsigned long long)nsecs >> 10);
227 * dpm_wait - Wait for a PM operation to complete.
228 * @dev: Device to wait for.
229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
231 static void dpm_wait(struct device *dev, bool async)
236 if (async || (pm_async_enabled && dev->power.async_suspend))
237 wait_for_completion(&dev->power.completion);
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
242 dpm_wait(dev, *((bool *)async_ptr));
246 static void dpm_wait_for_children(struct device *dev, bool async)
248 device_for_each_child(dev, &async, dpm_wait_fn);
251 static void dpm_wait_for_suppliers(struct device *dev, bool async)
253 struct device_link *link;
256 idx = device_links_read_lock();
259 * If the supplier goes away right after we've checked the link to it,
260 * we'll wait for its completion to change the state, but that's fine,
261 * because the only things that will block as a result are the SRCU
262 * callbacks freeing the link objects for the links in the list we're
265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267 dpm_wait(link->supplier, async);
269 device_links_read_unlock(idx);
272 static bool dpm_wait_for_superior(struct device *dev, bool async)
274 struct device *parent;
277 * If the device is resumed asynchronously and the parent's callback
278 * deletes both the device and the parent itself, the parent object may
279 * be freed while this function is running, so avoid that by reference
280 * counting the parent once more unless the device has been deleted
281 * already (in which case return right away).
283 mutex_lock(&dpm_list_mtx);
285 if (!device_pm_initialized(dev)) {
286 mutex_unlock(&dpm_list_mtx);
290 parent = get_device(dev->parent);
292 mutex_unlock(&dpm_list_mtx);
294 dpm_wait(parent, async);
297 dpm_wait_for_suppliers(dev, async);
300 * If the parent's callback has deleted the device, attempting to resume
301 * it would be invalid, so avoid doing that then.
303 return device_pm_initialized(dev);
306 static void dpm_wait_for_consumers(struct device *dev, bool async)
308 struct device_link *link;
311 idx = device_links_read_lock();
314 * The status of a device link can only be changed from "dormant" by a
315 * probe, but that cannot happen during system suspend/resume. In
316 * theory it can change to "dormant" at that time, but then it is
317 * reasonable to wait for the target device anyway (eg. if it goes
318 * away, it's better to wait for it to go away completely and then
319 * continue instead of trying to continue in parallel with its
322 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
323 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
324 dpm_wait(link->consumer, async);
326 device_links_read_unlock(idx);
329 static void dpm_wait_for_subordinate(struct device *dev, bool async)
331 dpm_wait_for_children(dev, async);
332 dpm_wait_for_consumers(dev, async);
336 * pm_op - Return the PM operation appropriate for given PM event.
337 * @ops: PM operations to choose from.
338 * @state: PM transition of the system being carried out.
340 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
342 switch (state.event) {
343 #ifdef CONFIG_SUSPEND
344 case PM_EVENT_SUSPEND:
346 case PM_EVENT_RESUME:
348 #endif /* CONFIG_SUSPEND */
349 #ifdef CONFIG_HIBERNATE_CALLBACKS
350 case PM_EVENT_FREEZE:
351 case PM_EVENT_QUIESCE:
353 case PM_EVENT_HIBERNATE:
354 return ops->poweroff;
356 case PM_EVENT_RECOVER:
359 case PM_EVENT_RESTORE:
361 #endif /* CONFIG_HIBERNATE_CALLBACKS */
368 * pm_late_early_op - Return the PM operation appropriate for given PM event.
369 * @ops: PM operations to choose from.
370 * @state: PM transition of the system being carried out.
372 * Runtime PM is disabled for @dev while this function is being executed.
374 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
377 switch (state.event) {
378 #ifdef CONFIG_SUSPEND
379 case PM_EVENT_SUSPEND:
380 return ops->suspend_late;
381 case PM_EVENT_RESUME:
382 return ops->resume_early;
383 #endif /* CONFIG_SUSPEND */
384 #ifdef CONFIG_HIBERNATE_CALLBACKS
385 case PM_EVENT_FREEZE:
386 case PM_EVENT_QUIESCE:
387 return ops->freeze_late;
388 case PM_EVENT_HIBERNATE:
389 return ops->poweroff_late;
391 case PM_EVENT_RECOVER:
392 return ops->thaw_early;
393 case PM_EVENT_RESTORE:
394 return ops->restore_early;
395 #endif /* CONFIG_HIBERNATE_CALLBACKS */
402 * pm_noirq_op - Return the PM operation appropriate for given PM event.
403 * @ops: PM operations to choose from.
404 * @state: PM transition of the system being carried out.
406 * The driver of @dev will not receive interrupts while this function is being
409 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
411 switch (state.event) {
412 #ifdef CONFIG_SUSPEND
413 case PM_EVENT_SUSPEND:
414 return ops->suspend_noirq;
415 case PM_EVENT_RESUME:
416 return ops->resume_noirq;
417 #endif /* CONFIG_SUSPEND */
418 #ifdef CONFIG_HIBERNATE_CALLBACKS
419 case PM_EVENT_FREEZE:
420 case PM_EVENT_QUIESCE:
421 return ops->freeze_noirq;
422 case PM_EVENT_HIBERNATE:
423 return ops->poweroff_noirq;
425 case PM_EVENT_RECOVER:
426 return ops->thaw_noirq;
427 case PM_EVENT_RESTORE:
428 return ops->restore_noirq;
429 #endif /* CONFIG_HIBERNATE_CALLBACKS */
435 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
437 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
438 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
439 ", may wakeup" : "");
442 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
445 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
446 dev_name(dev), pm_verb(state.event), info, error);
449 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
456 calltime = ktime_get();
457 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
458 do_div(usecs64, NSEC_PER_USEC);
463 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
464 info ?: "", info ? " " : "", pm_verb(state.event),
465 error ? "aborted" : "complete",
466 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
469 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
470 pm_message_t state, const char *info)
478 calltime = initcall_debug_start(dev);
480 pm_dev_dbg(dev, state, info);
481 trace_device_pm_callback_start(dev, info, state.event);
483 trace_device_pm_callback_end(dev, error);
484 suspend_report_result(cb, error);
486 initcall_debug_report(dev, calltime, error, state, info);
491 #ifdef CONFIG_DPM_WATCHDOG
492 struct dpm_watchdog {
494 struct task_struct *tsk;
495 struct timer_list timer;
498 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
499 struct dpm_watchdog wd
502 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
503 * @data: Watchdog object address.
505 * Called when a driver has timed out suspending or resuming.
506 * There's not much we can do here to recover so panic() to
507 * capture a crash-dump in pstore.
509 static void dpm_watchdog_handler(unsigned long data)
511 struct dpm_watchdog *wd = (void *)data;
513 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
514 show_stack(wd->tsk, NULL);
515 panic("%s %s: unrecoverable failure\n",
516 dev_driver_string(wd->dev), dev_name(wd->dev));
520 * dpm_watchdog_set - Enable pm watchdog for given device.
521 * @wd: Watchdog. Must be allocated on the stack.
522 * @dev: Device to handle.
524 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
526 struct timer_list *timer = &wd->timer;
531 init_timer_on_stack(timer);
532 /* use same timeout value for both suspend and resume */
533 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
534 timer->function = dpm_watchdog_handler;
535 timer->data = (unsigned long)wd;
540 * dpm_watchdog_clear - Disable suspend/resume watchdog.
541 * @wd: Watchdog to disable.
543 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
545 struct timer_list *timer = &wd->timer;
547 del_timer_sync(timer);
548 destroy_timer_on_stack(timer);
551 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
552 #define dpm_watchdog_set(x, y)
553 #define dpm_watchdog_clear(x)
556 /*------------------------- Resume routines -------------------------*/
559 * device_resume_noirq - Execute an "early resume" callback for given device.
560 * @dev: Device to handle.
561 * @state: PM transition of the system being carried out.
562 * @async: If true, the device is being resumed asynchronously.
564 * The driver of @dev will not receive interrupts while this function is being
567 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
569 pm_callback_t callback = NULL;
570 const char *info = NULL;
576 if (dev->power.syscore || dev->power.direct_complete)
579 if (!dev->power.is_noirq_suspended)
582 if (!dpm_wait_for_superior(dev, async))
585 if (dev->pm_domain) {
586 info = "noirq power domain ";
587 callback = pm_noirq_op(&dev->pm_domain->ops, state);
588 } else if (dev->type && dev->type->pm) {
589 info = "noirq type ";
590 callback = pm_noirq_op(dev->type->pm, state);
591 } else if (dev->class && dev->class->pm) {
592 info = "noirq class ";
593 callback = pm_noirq_op(dev->class->pm, state);
594 } else if (dev->bus && dev->bus->pm) {
596 callback = pm_noirq_op(dev->bus->pm, state);
599 if (!callback && dev->driver && dev->driver->pm) {
600 info = "noirq driver ";
601 callback = pm_noirq_op(dev->driver->pm, state);
604 error = dpm_run_callback(callback, dev, state, info);
605 dev->power.is_noirq_suspended = false;
608 complete_all(&dev->power.completion);
613 static bool is_async(struct device *dev)
615 return dev->power.async_suspend && pm_async_enabled
616 && !pm_trace_is_enabled();
619 static void async_resume_noirq(void *data, async_cookie_t cookie)
621 struct device *dev = (struct device *)data;
624 error = device_resume_noirq(dev, pm_transition, true);
626 pm_dev_err(dev, pm_transition, " async", error);
631 void dpm_noirq_resume_devices(pm_message_t state)
634 ktime_t starttime = ktime_get();
636 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
637 mutex_lock(&dpm_list_mtx);
638 pm_transition = state;
641 * Advanced the async threads upfront,
642 * in case the starting of async threads is
643 * delayed by non-async resuming devices.
645 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
646 reinit_completion(&dev->power.completion);
649 async_schedule(async_resume_noirq, dev);
653 while (!list_empty(&dpm_noirq_list)) {
654 dev = to_device(dpm_noirq_list.next);
656 list_move_tail(&dev->power.entry, &dpm_late_early_list);
657 mutex_unlock(&dpm_list_mtx);
659 if (!is_async(dev)) {
662 error = device_resume_noirq(dev, state, false);
664 suspend_stats.failed_resume_noirq++;
665 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
666 dpm_save_failed_dev(dev_name(dev));
667 pm_dev_err(dev, state, " noirq", error);
671 mutex_lock(&dpm_list_mtx);
674 mutex_unlock(&dpm_list_mtx);
675 async_synchronize_full();
676 dpm_show_time(starttime, state, 0, "noirq");
677 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
680 void dpm_noirq_end(void)
682 resume_device_irqs();
683 device_wakeup_disarm_wake_irqs();
688 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
689 * @state: PM transition of the system being carried out.
691 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
692 * allow device drivers' interrupt handlers to be called.
694 void dpm_resume_noirq(pm_message_t state)
696 dpm_noirq_resume_devices(state);
701 * device_resume_early - Execute an "early resume" callback for given device.
702 * @dev: Device to handle.
703 * @state: PM transition of the system being carried out.
704 * @async: If true, the device is being resumed asynchronously.
706 * Runtime PM is disabled for @dev while this function is being executed.
708 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
710 pm_callback_t callback = NULL;
711 const char *info = NULL;
717 if (dev->power.syscore || dev->power.direct_complete)
720 if (!dev->power.is_late_suspended)
723 if (!dpm_wait_for_superior(dev, async))
726 if (dev->pm_domain) {
727 info = "early power domain ";
728 callback = pm_late_early_op(&dev->pm_domain->ops, state);
729 } else if (dev->type && dev->type->pm) {
730 info = "early type ";
731 callback = pm_late_early_op(dev->type->pm, state);
732 } else if (dev->class && dev->class->pm) {
733 info = "early class ";
734 callback = pm_late_early_op(dev->class->pm, state);
735 } else if (dev->bus && dev->bus->pm) {
737 callback = pm_late_early_op(dev->bus->pm, state);
740 if (!callback && dev->driver && dev->driver->pm) {
741 info = "early driver ";
742 callback = pm_late_early_op(dev->driver->pm, state);
745 error = dpm_run_callback(callback, dev, state, info);
746 dev->power.is_late_suspended = false;
751 pm_runtime_enable(dev);
752 complete_all(&dev->power.completion);
756 static void async_resume_early(void *data, async_cookie_t cookie)
758 struct device *dev = (struct device *)data;
761 error = device_resume_early(dev, pm_transition, true);
763 pm_dev_err(dev, pm_transition, " async", error);
769 * dpm_resume_early - Execute "early resume" callbacks for all devices.
770 * @state: PM transition of the system being carried out.
772 void dpm_resume_early(pm_message_t state)
775 ktime_t starttime = ktime_get();
777 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
778 mutex_lock(&dpm_list_mtx);
779 pm_transition = state;
782 * Advanced the async threads upfront,
783 * in case the starting of async threads is
784 * delayed by non-async resuming devices.
786 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
787 reinit_completion(&dev->power.completion);
790 async_schedule(async_resume_early, dev);
794 while (!list_empty(&dpm_late_early_list)) {
795 dev = to_device(dpm_late_early_list.next);
797 list_move_tail(&dev->power.entry, &dpm_suspended_list);
798 mutex_unlock(&dpm_list_mtx);
800 if (!is_async(dev)) {
803 error = device_resume_early(dev, state, false);
805 suspend_stats.failed_resume_early++;
806 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
807 dpm_save_failed_dev(dev_name(dev));
808 pm_dev_err(dev, state, " early", error);
811 mutex_lock(&dpm_list_mtx);
814 mutex_unlock(&dpm_list_mtx);
815 async_synchronize_full();
816 dpm_show_time(starttime, state, 0, "early");
817 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
821 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
822 * @state: PM transition of the system being carried out.
824 void dpm_resume_start(pm_message_t state)
826 dpm_resume_noirq(state);
827 dpm_resume_early(state);
829 EXPORT_SYMBOL_GPL(dpm_resume_start);
832 * device_resume - Execute "resume" callbacks for given device.
833 * @dev: Device to handle.
834 * @state: PM transition of the system being carried out.
835 * @async: If true, the device is being resumed asynchronously.
837 static int device_resume(struct device *dev, pm_message_t state, bool async)
839 pm_callback_t callback = NULL;
840 const char *info = NULL;
842 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
847 if (dev->power.syscore)
850 if (dev->power.direct_complete) {
851 /* Match the pm_runtime_disable() in __device_suspend(). */
852 pm_runtime_enable(dev);
856 if (!dpm_wait_for_superior(dev, async))
859 dpm_watchdog_set(&wd, dev);
863 * This is a fib. But we'll allow new children to be added below
864 * a resumed device, even if the device hasn't been completed yet.
866 dev->power.is_prepared = false;
868 if (!dev->power.is_suspended)
871 if (dev->pm_domain) {
872 info = "power domain ";
873 callback = pm_op(&dev->pm_domain->ops, state);
877 if (dev->type && dev->type->pm) {
879 callback = pm_op(dev->type->pm, state);
884 if (dev->class->pm) {
886 callback = pm_op(dev->class->pm, state);
888 } else if (dev->class->resume) {
889 info = "legacy class ";
890 callback = dev->class->resume;
898 callback = pm_op(dev->bus->pm, state);
899 } else if (dev->bus->resume) {
900 info = "legacy bus ";
901 callback = dev->bus->resume;
907 if (!callback && dev->driver && dev->driver->pm) {
909 callback = pm_op(dev->driver->pm, state);
913 error = dpm_run_callback(callback, dev, state, info);
914 dev->power.is_suspended = false;
918 dpm_watchdog_clear(&wd);
921 complete_all(&dev->power.completion);
928 static void async_resume(void *data, async_cookie_t cookie)
930 struct device *dev = (struct device *)data;
933 error = device_resume(dev, pm_transition, true);
935 pm_dev_err(dev, pm_transition, " async", error);
940 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
941 * @state: PM transition of the system being carried out.
943 * Execute the appropriate "resume" callback for all devices whose status
944 * indicates that they are suspended.
946 void dpm_resume(pm_message_t state)
949 ktime_t starttime = ktime_get();
951 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
954 mutex_lock(&dpm_list_mtx);
955 pm_transition = state;
958 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
959 reinit_completion(&dev->power.completion);
962 async_schedule(async_resume, dev);
966 while (!list_empty(&dpm_suspended_list)) {
967 dev = to_device(dpm_suspended_list.next);
969 if (!is_async(dev)) {
972 mutex_unlock(&dpm_list_mtx);
974 error = device_resume(dev, state, false);
976 suspend_stats.failed_resume++;
977 dpm_save_failed_step(SUSPEND_RESUME);
978 dpm_save_failed_dev(dev_name(dev));
979 pm_dev_err(dev, state, "", error);
982 mutex_lock(&dpm_list_mtx);
984 if (!list_empty(&dev->power.entry))
985 list_move_tail(&dev->power.entry, &dpm_prepared_list);
988 mutex_unlock(&dpm_list_mtx);
989 async_synchronize_full();
990 dpm_show_time(starttime, state, 0, NULL);
993 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
997 * device_complete - Complete a PM transition for given device.
998 * @dev: Device to handle.
999 * @state: PM transition of the system being carried out.
1001 static void device_complete(struct device *dev, pm_message_t state)
1003 void (*callback)(struct device *) = NULL;
1004 const char *info = NULL;
1006 if (dev->power.syscore)
1011 if (dev->pm_domain) {
1012 info = "completing power domain ";
1013 callback = dev->pm_domain->ops.complete;
1014 } else if (dev->type && dev->type->pm) {
1015 info = "completing type ";
1016 callback = dev->type->pm->complete;
1017 } else if (dev->class && dev->class->pm) {
1018 info = "completing class ";
1019 callback = dev->class->pm->complete;
1020 } else if (dev->bus && dev->bus->pm) {
1021 info = "completing bus ";
1022 callback = dev->bus->pm->complete;
1025 if (!callback && dev->driver && dev->driver->pm) {
1026 info = "completing driver ";
1027 callback = dev->driver->pm->complete;
1031 pm_dev_dbg(dev, state, info);
1037 pm_runtime_put(dev);
1041 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1042 * @state: PM transition of the system being carried out.
1044 * Execute the ->complete() callbacks for all devices whose PM status is not
1045 * DPM_ON (this allows new devices to be registered).
1047 void dpm_complete(pm_message_t state)
1049 struct list_head list;
1051 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1054 INIT_LIST_HEAD(&list);
1055 mutex_lock(&dpm_list_mtx);
1056 while (!list_empty(&dpm_prepared_list)) {
1057 struct device *dev = to_device(dpm_prepared_list.prev);
1060 dev->power.is_prepared = false;
1061 list_move(&dev->power.entry, &list);
1062 mutex_unlock(&dpm_list_mtx);
1064 trace_device_pm_callback_start(dev, "", state.event);
1065 device_complete(dev, state);
1066 trace_device_pm_callback_end(dev, 0);
1068 mutex_lock(&dpm_list_mtx);
1071 list_splice(&list, &dpm_list);
1072 mutex_unlock(&dpm_list_mtx);
1074 /* Allow device probing and trigger re-probing of deferred devices */
1075 device_unblock_probing();
1076 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1080 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1081 * @state: PM transition of the system being carried out.
1083 * Execute "resume" callbacks for all devices and complete the PM transition of
1086 void dpm_resume_end(pm_message_t state)
1089 dpm_complete(state);
1091 EXPORT_SYMBOL_GPL(dpm_resume_end);
1094 /*------------------------- Suspend routines -------------------------*/
1097 * resume_event - Return a "resume" message for given "suspend" sleep state.
1098 * @sleep_state: PM message representing a sleep state.
1100 * Return a PM message representing the resume event corresponding to given
1103 static pm_message_t resume_event(pm_message_t sleep_state)
1105 switch (sleep_state.event) {
1106 case PM_EVENT_SUSPEND:
1108 case PM_EVENT_FREEZE:
1109 case PM_EVENT_QUIESCE:
1110 return PMSG_RECOVER;
1111 case PM_EVENT_HIBERNATE:
1112 return PMSG_RESTORE;
1118 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1119 * @dev: Device to handle.
1120 * @state: PM transition of the system being carried out.
1121 * @async: If true, the device is being suspended asynchronously.
1123 * The driver of @dev will not receive interrupts while this function is being
1126 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1128 pm_callback_t callback = NULL;
1129 const char *info = NULL;
1135 dpm_wait_for_subordinate(dev, async);
1140 if (pm_wakeup_pending()) {
1141 async_error = -EBUSY;
1145 if (dev->power.syscore || dev->power.direct_complete)
1148 if (dev->pm_domain) {
1149 info = "noirq power domain ";
1150 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1151 } else if (dev->type && dev->type->pm) {
1152 info = "noirq type ";
1153 callback = pm_noirq_op(dev->type->pm, state);
1154 } else if (dev->class && dev->class->pm) {
1155 info = "noirq class ";
1156 callback = pm_noirq_op(dev->class->pm, state);
1157 } else if (dev->bus && dev->bus->pm) {
1158 info = "noirq bus ";
1159 callback = pm_noirq_op(dev->bus->pm, state);
1162 if (!callback && dev->driver && dev->driver->pm) {
1163 info = "noirq driver ";
1164 callback = pm_noirq_op(dev->driver->pm, state);
1167 error = dpm_run_callback(callback, dev, state, info);
1169 dev->power.is_noirq_suspended = true;
1171 async_error = error;
1174 complete_all(&dev->power.completion);
1175 TRACE_SUSPEND(error);
1179 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1181 struct device *dev = (struct device *)data;
1184 error = __device_suspend_noirq(dev, pm_transition, true);
1186 dpm_save_failed_dev(dev_name(dev));
1187 pm_dev_err(dev, pm_transition, " async", error);
1193 static int device_suspend_noirq(struct device *dev)
1195 reinit_completion(&dev->power.completion);
1197 if (is_async(dev)) {
1199 async_schedule(async_suspend_noirq, dev);
1202 return __device_suspend_noirq(dev, pm_transition, false);
1205 void dpm_noirq_begin(void)
1208 device_wakeup_arm_wake_irqs();
1209 suspend_device_irqs();
1212 int dpm_noirq_suspend_devices(pm_message_t state)
1214 ktime_t starttime = ktime_get();
1217 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1218 mutex_lock(&dpm_list_mtx);
1219 pm_transition = state;
1222 while (!list_empty(&dpm_late_early_list)) {
1223 struct device *dev = to_device(dpm_late_early_list.prev);
1226 mutex_unlock(&dpm_list_mtx);
1228 error = device_suspend_noirq(dev);
1230 mutex_lock(&dpm_list_mtx);
1232 pm_dev_err(dev, state, " noirq", error);
1233 dpm_save_failed_dev(dev_name(dev));
1237 if (!list_empty(&dev->power.entry))
1238 list_move(&dev->power.entry, &dpm_noirq_list);
1244 mutex_unlock(&dpm_list_mtx);
1245 async_synchronize_full();
1247 error = async_error;
1250 suspend_stats.failed_suspend_noirq++;
1251 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1253 dpm_show_time(starttime, state, error, "noirq");
1254 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1259 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1260 * @state: PM transition of the system being carried out.
1262 * Prevent device drivers' interrupt handlers from being called and invoke
1263 * "noirq" suspend callbacks for all non-sysdev devices.
1265 int dpm_suspend_noirq(pm_message_t state)
1270 ret = dpm_noirq_suspend_devices(state);
1272 dpm_resume_noirq(resume_event(state));
1278 * device_suspend_late - Execute a "late suspend" callback for given device.
1279 * @dev: Device to handle.
1280 * @state: PM transition of the system being carried out.
1281 * @async: If true, the device is being suspended asynchronously.
1283 * Runtime PM is disabled for @dev while this function is being executed.
1285 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1287 pm_callback_t callback = NULL;
1288 const char *info = NULL;
1294 __pm_runtime_disable(dev, false);
1296 dpm_wait_for_subordinate(dev, async);
1301 if (pm_wakeup_pending()) {
1302 async_error = -EBUSY;
1306 if (dev->power.syscore || dev->power.direct_complete)
1309 if (dev->pm_domain) {
1310 info = "late power domain ";
1311 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1312 } else if (dev->type && dev->type->pm) {
1313 info = "late type ";
1314 callback = pm_late_early_op(dev->type->pm, state);
1315 } else if (dev->class && dev->class->pm) {
1316 info = "late class ";
1317 callback = pm_late_early_op(dev->class->pm, state);
1318 } else if (dev->bus && dev->bus->pm) {
1320 callback = pm_late_early_op(dev->bus->pm, state);
1323 if (!callback && dev->driver && dev->driver->pm) {
1324 info = "late driver ";
1325 callback = pm_late_early_op(dev->driver->pm, state);
1328 error = dpm_run_callback(callback, dev, state, info);
1330 dev->power.is_late_suspended = true;
1332 async_error = error;
1335 TRACE_SUSPEND(error);
1336 complete_all(&dev->power.completion);
1340 static void async_suspend_late(void *data, async_cookie_t cookie)
1342 struct device *dev = (struct device *)data;
1345 error = __device_suspend_late(dev, pm_transition, true);
1347 dpm_save_failed_dev(dev_name(dev));
1348 pm_dev_err(dev, pm_transition, " async", error);
1353 static int device_suspend_late(struct device *dev)
1355 reinit_completion(&dev->power.completion);
1357 if (is_async(dev)) {
1359 async_schedule(async_suspend_late, dev);
1363 return __device_suspend_late(dev, pm_transition, false);
1367 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1368 * @state: PM transition of the system being carried out.
1370 int dpm_suspend_late(pm_message_t state)
1372 ktime_t starttime = ktime_get();
1375 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1376 mutex_lock(&dpm_list_mtx);
1377 pm_transition = state;
1380 while (!list_empty(&dpm_suspended_list)) {
1381 struct device *dev = to_device(dpm_suspended_list.prev);
1384 mutex_unlock(&dpm_list_mtx);
1386 error = device_suspend_late(dev);
1388 mutex_lock(&dpm_list_mtx);
1389 if (!list_empty(&dev->power.entry))
1390 list_move(&dev->power.entry, &dpm_late_early_list);
1393 pm_dev_err(dev, state, " late", error);
1394 dpm_save_failed_dev(dev_name(dev));
1403 mutex_unlock(&dpm_list_mtx);
1404 async_synchronize_full();
1406 error = async_error;
1408 suspend_stats.failed_suspend_late++;
1409 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1410 dpm_resume_early(resume_event(state));
1412 dpm_show_time(starttime, state, error, "late");
1413 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1418 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1419 * @state: PM transition of the system being carried out.
1421 int dpm_suspend_end(pm_message_t state)
1423 int error = dpm_suspend_late(state);
1427 error = dpm_suspend_noirq(state);
1429 dpm_resume_early(resume_event(state));
1435 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1438 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1439 * @dev: Device to suspend.
1440 * @state: PM transition of the system being carried out.
1441 * @cb: Suspend callback to execute.
1442 * @info: string description of caller.
1444 static int legacy_suspend(struct device *dev, pm_message_t state,
1445 int (*cb)(struct device *dev, pm_message_t state),
1451 calltime = initcall_debug_start(dev);
1453 trace_device_pm_callback_start(dev, info, state.event);
1454 error = cb(dev, state);
1455 trace_device_pm_callback_end(dev, error);
1456 suspend_report_result(cb, error);
1458 initcall_debug_report(dev, calltime, error, state, info);
1463 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1465 struct device_link *link;
1468 idx = device_links_read_lock();
1470 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1471 spin_lock_irq(&link->supplier->power.lock);
1472 link->supplier->power.direct_complete = false;
1473 spin_unlock_irq(&link->supplier->power.lock);
1476 device_links_read_unlock(idx);
1480 * device_suspend - Execute "suspend" callbacks for given device.
1481 * @dev: Device to handle.
1482 * @state: PM transition of the system being carried out.
1483 * @async: If true, the device is being suspended asynchronously.
1485 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1487 pm_callback_t callback = NULL;
1488 const char *info = NULL;
1490 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1495 dpm_wait_for_subordinate(dev, async);
1498 dev->power.direct_complete = false;
1503 * Wait for possible runtime PM transitions of the device in progress
1504 * to complete and if there's a runtime resume request pending for it,
1505 * resume it before proceeding with invoking the system-wide suspend
1508 * If the system-wide suspend callbacks below change the configuration
1509 * of the device, they must disable runtime PM for it or otherwise
1510 * ensure that its runtime-resume callbacks will not be confused by that
1511 * change in case they are invoked going forward.
1513 pm_runtime_barrier(dev);
1515 if (pm_wakeup_pending()) {
1516 dev->power.direct_complete = false;
1517 async_error = -EBUSY;
1521 if (dev->power.syscore)
1524 /* Avoid direct_complete to let wakeup_path propagate. */
1525 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1526 dev->power.direct_complete = false;
1528 if (dev->power.direct_complete) {
1529 if (pm_runtime_status_suspended(dev)) {
1530 pm_runtime_disable(dev);
1531 if (pm_runtime_status_suspended(dev))
1534 pm_runtime_enable(dev);
1536 dev->power.direct_complete = false;
1539 dpm_watchdog_set(&wd, dev);
1542 if (dev->pm_domain) {
1543 info = "power domain ";
1544 callback = pm_op(&dev->pm_domain->ops, state);
1548 if (dev->type && dev->type->pm) {
1550 callback = pm_op(dev->type->pm, state);
1555 if (dev->class->pm) {
1557 callback = pm_op(dev->class->pm, state);
1559 } else if (dev->class->suspend) {
1560 pm_dev_dbg(dev, state, "legacy class ");
1561 error = legacy_suspend(dev, state, dev->class->suspend,
1570 callback = pm_op(dev->bus->pm, state);
1571 } else if (dev->bus->suspend) {
1572 pm_dev_dbg(dev, state, "legacy bus ");
1573 error = legacy_suspend(dev, state, dev->bus->suspend,
1580 if (!callback && dev->driver && dev->driver->pm) {
1582 callback = pm_op(dev->driver->pm, state);
1585 error = dpm_run_callback(callback, dev, state, info);
1589 struct device *parent = dev->parent;
1591 dev->power.is_suspended = true;
1593 spin_lock_irq(&parent->power.lock);
1595 dev->parent->power.direct_complete = false;
1596 if (dev->power.wakeup_path
1597 && !dev->parent->power.ignore_children)
1598 dev->parent->power.wakeup_path = true;
1600 spin_unlock_irq(&parent->power.lock);
1602 dpm_clear_suppliers_direct_complete(dev);
1606 dpm_watchdog_clear(&wd);
1610 async_error = error;
1612 complete_all(&dev->power.completion);
1613 TRACE_SUSPEND(error);
1617 static void async_suspend(void *data, async_cookie_t cookie)
1619 struct device *dev = (struct device *)data;
1622 error = __device_suspend(dev, pm_transition, true);
1624 dpm_save_failed_dev(dev_name(dev));
1625 pm_dev_err(dev, pm_transition, " async", error);
1631 static int device_suspend(struct device *dev)
1633 reinit_completion(&dev->power.completion);
1635 if (is_async(dev)) {
1637 async_schedule(async_suspend, dev);
1641 return __device_suspend(dev, pm_transition, false);
1645 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1646 * @state: PM transition of the system being carried out.
1648 int dpm_suspend(pm_message_t state)
1650 ktime_t starttime = ktime_get();
1653 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1658 mutex_lock(&dpm_list_mtx);
1659 pm_transition = state;
1661 while (!list_empty(&dpm_prepared_list)) {
1662 struct device *dev = to_device(dpm_prepared_list.prev);
1665 mutex_unlock(&dpm_list_mtx);
1667 error = device_suspend(dev);
1669 mutex_lock(&dpm_list_mtx);
1671 pm_dev_err(dev, state, "", error);
1672 dpm_save_failed_dev(dev_name(dev));
1676 if (!list_empty(&dev->power.entry))
1677 list_move(&dev->power.entry, &dpm_suspended_list);
1682 mutex_unlock(&dpm_list_mtx);
1683 async_synchronize_full();
1685 error = async_error;
1687 suspend_stats.failed_suspend++;
1688 dpm_save_failed_step(SUSPEND_SUSPEND);
1690 dpm_show_time(starttime, state, error, NULL);
1691 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1696 * device_prepare - Prepare a device for system power transition.
1697 * @dev: Device to handle.
1698 * @state: PM transition of the system being carried out.
1700 * Execute the ->prepare() callback(s) for given device. No new children of the
1701 * device may be registered after this function has returned.
1703 static int device_prepare(struct device *dev, pm_message_t state)
1705 int (*callback)(struct device *) = NULL;
1708 if (dev->power.syscore)
1712 * If a device's parent goes into runtime suspend at the wrong time,
1713 * it won't be possible to resume the device. To prevent this we
1714 * block runtime suspend here, during the prepare phase, and allow
1715 * it again during the complete phase.
1717 pm_runtime_get_noresume(dev);
1721 dev->power.wakeup_path = device_may_wakeup(dev);
1723 if (dev->power.no_pm_callbacks) {
1724 ret = 1; /* Let device go direct_complete */
1729 callback = dev->pm_domain->ops.prepare;
1730 else if (dev->type && dev->type->pm)
1731 callback = dev->type->pm->prepare;
1732 else if (dev->class && dev->class->pm)
1733 callback = dev->class->pm->prepare;
1734 else if (dev->bus && dev->bus->pm)
1735 callback = dev->bus->pm->prepare;
1737 if (!callback && dev->driver && dev->driver->pm)
1738 callback = dev->driver->pm->prepare;
1741 ret = callback(dev);
1747 suspend_report_result(callback, ret);
1748 pm_runtime_put(dev);
1752 * A positive return value from ->prepare() means "this device appears
1753 * to be runtime-suspended and its state is fine, so if it really is
1754 * runtime-suspended, you can leave it in that state provided that you
1755 * will do the same thing with all of its descendants". This only
1756 * applies to suspend transitions, however.
1758 spin_lock_irq(&dev->power.lock);
1759 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1760 spin_unlock_irq(&dev->power.lock);
1765 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1766 * @state: PM transition of the system being carried out.
1768 * Execute the ->prepare() callback(s) for all devices.
1770 int dpm_prepare(pm_message_t state)
1774 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1778 * Give a chance for the known devices to complete their probes, before
1779 * disable probing of devices. This sync point is important at least
1780 * at boot time + hibernation restore.
1782 wait_for_device_probe();
1784 * It is unsafe if probing of devices will happen during suspend or
1785 * hibernation and system behavior will be unpredictable in this case.
1786 * So, let's prohibit device's probing here and defer their probes
1787 * instead. The normal behavior will be restored in dpm_complete().
1789 device_block_probing();
1791 mutex_lock(&dpm_list_mtx);
1792 while (!list_empty(&dpm_list)) {
1793 struct device *dev = to_device(dpm_list.next);
1796 mutex_unlock(&dpm_list_mtx);
1798 trace_device_pm_callback_start(dev, "", state.event);
1799 error = device_prepare(dev, state);
1800 trace_device_pm_callback_end(dev, error);
1802 mutex_lock(&dpm_list_mtx);
1804 if (error == -EAGAIN) {
1809 printk(KERN_INFO "PM: Device %s not prepared "
1810 "for power transition: code %d\n",
1811 dev_name(dev), error);
1815 dev->power.is_prepared = true;
1816 if (!list_empty(&dev->power.entry))
1817 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1820 mutex_unlock(&dpm_list_mtx);
1821 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1826 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1827 * @state: PM transition of the system being carried out.
1829 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1830 * callbacks for them.
1832 int dpm_suspend_start(pm_message_t state)
1836 error = dpm_prepare(state);
1838 suspend_stats.failed_prepare++;
1839 dpm_save_failed_step(SUSPEND_PREPARE);
1841 error = dpm_suspend(state);
1844 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1846 void __suspend_report_result(const char *function, void *fn, int ret)
1849 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1851 EXPORT_SYMBOL_GPL(__suspend_report_result);
1854 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1855 * @dev: Device to wait for.
1856 * @subordinate: Device that needs to wait for @dev.
1858 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1860 dpm_wait(dev, subordinate->power.async_suspend);
1863 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1866 * dpm_for_each_dev - device iterator.
1867 * @data: data for the callback.
1868 * @fn: function to be called for each device.
1870 * Iterate over devices in dpm_list, and call @fn for each device,
1873 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1881 list_for_each_entry(dev, &dpm_list, power.entry)
1885 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1887 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1892 return !ops->prepare &&
1894 !ops->suspend_late &&
1895 !ops->suspend_noirq &&
1896 !ops->resume_noirq &&
1897 !ops->resume_early &&
1902 void device_pm_check_callbacks(struct device *dev)
1904 spin_lock_irq(&dev->power.lock);
1905 dev->power.no_pm_callbacks =
1906 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1907 !dev->bus->suspend && !dev->bus->resume)) &&
1908 (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1909 !dev->class->suspend && !dev->class->resume)) &&
1910 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1911 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1912 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1913 !dev->driver->suspend && !dev->driver->resume));
1914 spin_unlock_irq(&dev->power.lock);