GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44         list_for_each_entry_rcu(pos, head, member, \
45                         device_links_read_lock_held())
46
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66
67 static int async_error;
68
69 static const char *pm_verb(int event)
70 {
71         switch (event) {
72         case PM_EVENT_SUSPEND:
73                 return "suspend";
74         case PM_EVENT_RESUME:
75                 return "resume";
76         case PM_EVENT_FREEZE:
77                 return "freeze";
78         case PM_EVENT_QUIESCE:
79                 return "quiesce";
80         case PM_EVENT_HIBERNATE:
81                 return "hibernate";
82         case PM_EVENT_THAW:
83                 return "thaw";
84         case PM_EVENT_RESTORE:
85                 return "restore";
86         case PM_EVENT_RECOVER:
87                 return "recover";
88         default:
89                 return "(unknown PM event)";
90         }
91 }
92
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
97 void device_pm_sleep_init(struct device *dev)
98 {
99         dev->power.is_prepared = false;
100         dev->power.is_suspended = false;
101         dev->power.is_noirq_suspended = false;
102         dev->power.is_late_suspended = false;
103         init_completion(&dev->power.completion);
104         complete_all(&dev->power.completion);
105         dev->power.wakeup = NULL;
106         INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
112 void device_pm_lock(void)
113 {
114         mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
120 void device_pm_unlock(void)
121 {
122         mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
129 void device_pm_add(struct device *dev)
130 {
131         /* Skip PM setup/initialization. */
132         if (device_pm_not_required(dev))
133                 return;
134
135         pr_debug("Adding info for %s:%s\n",
136                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137         device_pm_check_callbacks(dev);
138         mutex_lock(&dpm_list_mtx);
139         if (dev->parent && dev->parent->power.is_prepared)
140                 dev_warn(dev, "parent %s should not be sleeping\n",
141                         dev_name(dev->parent));
142         list_add_tail(&dev->power.entry, &dpm_list);
143         dev->power.in_dpm_list = true;
144         mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
151 void device_pm_remove(struct device *dev)
152 {
153         if (device_pm_not_required(dev))
154                 return;
155
156         pr_debug("Removing info for %s:%s\n",
157                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158         complete_all(&dev->power.completion);
159         mutex_lock(&dpm_list_mtx);
160         list_del_init(&dev->power.entry);
161         dev->power.in_dpm_list = false;
162         mutex_unlock(&dpm_list_mtx);
163         device_wakeup_disable(dev);
164         pm_runtime_remove(dev);
165         device_pm_check_callbacks(dev);
166 }
167
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175         pr_debug("Moving %s:%s before %s:%s\n",
176                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178         /* Delete deva from dpm_list and reinsert before devb. */
179         list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189         pr_debug("Moving %s:%s after %s:%s\n",
190                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192         /* Delete deva from dpm_list and reinsert after devb. */
193         list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
200 void device_pm_move_last(struct device *dev)
201 {
202         pr_debug("Moving %s:%s to end of list\n",
203                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204         list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209         if (!pm_print_times_enabled)
210                 return 0;
211
212         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213                  task_pid_nr(current),
214                  dev->parent ? dev_name(dev->parent) : "none");
215         return ktime_get();
216 }
217
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219                                   void *cb, int error)
220 {
221         ktime_t rettime;
222         s64 nsecs;
223
224         if (!pm_print_times_enabled)
225                 return;
226
227         rettime = ktime_get();
228         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
229
230         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231                  (unsigned long long)nsecs >> 10);
232 }
233
234 /**
235  * dpm_wait - Wait for a PM operation to complete.
236  * @dev: Device to wait for.
237  * @async: If unset, wait only if the device's power.async_suspend flag is set.
238  */
239 static void dpm_wait(struct device *dev, bool async)
240 {
241         if (!dev)
242                 return;
243
244         if (async || (pm_async_enabled && dev->power.async_suspend))
245                 wait_for_completion(&dev->power.completion);
246 }
247
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
249 {
250         dpm_wait(dev, *((bool *)async_ptr));
251         return 0;
252 }
253
254 static void dpm_wait_for_children(struct device *dev, bool async)
255 {
256        device_for_each_child(dev, &async, dpm_wait_fn);
257 }
258
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
260 {
261         struct device_link *link;
262         int idx;
263
264         idx = device_links_read_lock();
265
266         /*
267          * If the supplier goes away right after we've checked the link to it,
268          * we'll wait for its completion to change the state, but that's fine,
269          * because the only things that will block as a result are the SRCU
270          * callbacks freeing the link objects for the links in the list we're
271          * walking.
272          */
273         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275                         dpm_wait(link->supplier, async);
276
277         device_links_read_unlock(idx);
278 }
279
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
281 {
282         struct device *parent;
283
284         /*
285          * If the device is resumed asynchronously and the parent's callback
286          * deletes both the device and the parent itself, the parent object may
287          * be freed while this function is running, so avoid that by reference
288          * counting the parent once more unless the device has been deleted
289          * already (in which case return right away).
290          */
291         mutex_lock(&dpm_list_mtx);
292
293         if (!device_pm_initialized(dev)) {
294                 mutex_unlock(&dpm_list_mtx);
295                 return false;
296         }
297
298         parent = get_device(dev->parent);
299
300         mutex_unlock(&dpm_list_mtx);
301
302         dpm_wait(parent, async);
303         put_device(parent);
304
305         dpm_wait_for_suppliers(dev, async);
306
307         /*
308          * If the parent's callback has deleted the device, attempting to resume
309          * it would be invalid, so avoid doing that then.
310          */
311         return device_pm_initialized(dev);
312 }
313
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
315 {
316         struct device_link *link;
317         int idx;
318
319         idx = device_links_read_lock();
320
321         /*
322          * The status of a device link can only be changed from "dormant" by a
323          * probe, but that cannot happen during system suspend/resume.  In
324          * theory it can change to "dormant" at that time, but then it is
325          * reasonable to wait for the target device anyway (eg. if it goes
326          * away, it's better to wait for it to go away completely and then
327          * continue instead of trying to continue in parallel with its
328          * unregistration).
329          */
330         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332                         dpm_wait(link->consumer, async);
333
334         device_links_read_unlock(idx);
335 }
336
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
338 {
339         dpm_wait_for_children(dev, async);
340         dpm_wait_for_consumers(dev, async);
341 }
342
343 /**
344  * pm_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  */
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
349 {
350         switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352         case PM_EVENT_SUSPEND:
353                 return ops->suspend;
354         case PM_EVENT_RESUME:
355                 return ops->resume;
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358         case PM_EVENT_FREEZE:
359         case PM_EVENT_QUIESCE:
360                 return ops->freeze;
361         case PM_EVENT_HIBERNATE:
362                 return ops->poweroff;
363         case PM_EVENT_THAW:
364         case PM_EVENT_RECOVER:
365                 return ops->thaw;
366         case PM_EVENT_RESTORE:
367                 return ops->restore;
368 #endif /* CONFIG_HIBERNATE_CALLBACKS */
369         }
370
371         return NULL;
372 }
373
374 /**
375  * pm_late_early_op - Return the PM operation appropriate for given PM event.
376  * @ops: PM operations to choose from.
377  * @state: PM transition of the system being carried out.
378  *
379  * Runtime PM is disabled for @dev while this function is being executed.
380  */
381 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
382                                       pm_message_t state)
383 {
384         switch (state.event) {
385 #ifdef CONFIG_SUSPEND
386         case PM_EVENT_SUSPEND:
387                 return ops->suspend_late;
388         case PM_EVENT_RESUME:
389                 return ops->resume_early;
390 #endif /* CONFIG_SUSPEND */
391 #ifdef CONFIG_HIBERNATE_CALLBACKS
392         case PM_EVENT_FREEZE:
393         case PM_EVENT_QUIESCE:
394                 return ops->freeze_late;
395         case PM_EVENT_HIBERNATE:
396                 return ops->poweroff_late;
397         case PM_EVENT_THAW:
398         case PM_EVENT_RECOVER:
399                 return ops->thaw_early;
400         case PM_EVENT_RESTORE:
401                 return ops->restore_early;
402 #endif /* CONFIG_HIBERNATE_CALLBACKS */
403         }
404
405         return NULL;
406 }
407
408 /**
409  * pm_noirq_op - Return the PM operation appropriate for given PM event.
410  * @ops: PM operations to choose from.
411  * @state: PM transition of the system being carried out.
412  *
413  * The driver of @dev will not receive interrupts while this function is being
414  * executed.
415  */
416 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
417 {
418         switch (state.event) {
419 #ifdef CONFIG_SUSPEND
420         case PM_EVENT_SUSPEND:
421                 return ops->suspend_noirq;
422         case PM_EVENT_RESUME:
423                 return ops->resume_noirq;
424 #endif /* CONFIG_SUSPEND */
425 #ifdef CONFIG_HIBERNATE_CALLBACKS
426         case PM_EVENT_FREEZE:
427         case PM_EVENT_QUIESCE:
428                 return ops->freeze_noirq;
429         case PM_EVENT_HIBERNATE:
430                 return ops->poweroff_noirq;
431         case PM_EVENT_THAW:
432         case PM_EVENT_RECOVER:
433                 return ops->thaw_noirq;
434         case PM_EVENT_RESTORE:
435                 return ops->restore_noirq;
436 #endif /* CONFIG_HIBERNATE_CALLBACKS */
437         }
438
439         return NULL;
440 }
441
442 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
443 {
444         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
445                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
446                 ", may wakeup" : "");
447 }
448
449 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
450                         int error)
451 {
452         pr_err("Device %s failed to %s%s: error %d\n",
453                dev_name(dev), pm_verb(state.event), info, error);
454 }
455
456 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
457                           const char *info)
458 {
459         ktime_t calltime;
460         u64 usecs64;
461         int usecs;
462
463         calltime = ktime_get();
464         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
465         do_div(usecs64, NSEC_PER_USEC);
466         usecs = usecs64;
467         if (usecs == 0)
468                 usecs = 1;
469
470         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
471                   info ?: "", info ? " " : "", pm_verb(state.event),
472                   error ? "aborted" : "complete",
473                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
474 }
475
476 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
477                             pm_message_t state, const char *info)
478 {
479         ktime_t calltime;
480         int error;
481
482         if (!cb)
483                 return 0;
484
485         calltime = initcall_debug_start(dev, cb);
486
487         pm_dev_dbg(dev, state, info);
488         trace_device_pm_callback_start(dev, info, state.event);
489         error = cb(dev);
490         trace_device_pm_callback_end(dev, error);
491         suspend_report_result(cb, error);
492
493         initcall_debug_report(dev, calltime, cb, error);
494
495         return error;
496 }
497
498 #ifdef CONFIG_DPM_WATCHDOG
499 struct dpm_watchdog {
500         struct device           *dev;
501         struct task_struct      *tsk;
502         struct timer_list       timer;
503 };
504
505 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
506         struct dpm_watchdog wd
507
508 /**
509  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
510  * @t: The timer that PM watchdog depends on.
511  *
512  * Called when a driver has timed out suspending or resuming.
513  * There's not much we can do here to recover so panic() to
514  * capture a crash-dump in pstore.
515  */
516 static void dpm_watchdog_handler(struct timer_list *t)
517 {
518         struct dpm_watchdog *wd = from_timer(wd, t, timer);
519
520         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521         show_stack(wd->tsk, NULL, KERN_EMERG);
522         panic("%s %s: unrecoverable failure\n",
523                 dev_driver_string(wd->dev), dev_name(wd->dev));
524 }
525
526 /**
527  * dpm_watchdog_set - Enable pm watchdog for given device.
528  * @wd: Watchdog. Must be allocated on the stack.
529  * @dev: Device to handle.
530  */
531 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
532 {
533         struct timer_list *timer = &wd->timer;
534
535         wd->dev = dev;
536         wd->tsk = current;
537
538         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
539         /* use same timeout value for both suspend and resume */
540         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
541         add_timer(timer);
542 }
543
544 /**
545  * dpm_watchdog_clear - Disable suspend/resume watchdog.
546  * @wd: Watchdog to disable.
547  */
548 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
549 {
550         struct timer_list *timer = &wd->timer;
551
552         del_timer_sync(timer);
553         destroy_timer_on_stack(timer);
554 }
555 #else
556 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
557 #define dpm_watchdog_set(x, y)
558 #define dpm_watchdog_clear(x)
559 #endif
560
561 /*------------------------- Resume routines -------------------------*/
562
563 /**
564  * dev_pm_skip_resume - System-wide device resume optimization check.
565  * @dev: Target device.
566  *
567  * Return:
568  * - %false if the transition under way is RESTORE.
569  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
570  * - The logical negation of %power.must_resume otherwise (that is, when the
571  *   transition under way is RESUME).
572  */
573 bool dev_pm_skip_resume(struct device *dev)
574 {
575         if (pm_transition.event == PM_EVENT_RESTORE)
576                 return false;
577
578         if (pm_transition.event == PM_EVENT_THAW)
579                 return dev_pm_skip_suspend(dev);
580
581         return !dev->power.must_resume;
582 }
583
584 /**
585  * device_resume_noirq - Execute a "noirq resume" callback for given device.
586  * @dev: Device to handle.
587  * @state: PM transition of the system being carried out.
588  * @async: If true, the device is being resumed asynchronously.
589  *
590  * The driver of @dev will not receive interrupts while this function is being
591  * executed.
592  */
593 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
594 {
595         pm_callback_t callback = NULL;
596         const char *info = NULL;
597         bool skip_resume;
598         int error = 0;
599
600         TRACE_DEVICE(dev);
601         TRACE_RESUME(0);
602
603         if (dev->power.syscore || dev->power.direct_complete)
604                 goto Out;
605
606         if (!dev->power.is_noirq_suspended)
607                 goto Out;
608
609         if (!dpm_wait_for_superior(dev, async))
610                 goto Out;
611
612         skip_resume = dev_pm_skip_resume(dev);
613         /*
614          * If the driver callback is skipped below or by the middle layer
615          * callback and device_resume_early() also skips the driver callback for
616          * this device later, it needs to appear as "suspended" to PM-runtime,
617          * so change its status accordingly.
618          *
619          * Otherwise, the device is going to be resumed, so set its PM-runtime
620          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
621          * to avoid confusing drivers that don't use it.
622          */
623         if (skip_resume)
624                 pm_runtime_set_suspended(dev);
625         else if (dev_pm_skip_suspend(dev))
626                 pm_runtime_set_active(dev);
627
628         if (dev->pm_domain) {
629                 info = "noirq power domain ";
630                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
631         } else if (dev->type && dev->type->pm) {
632                 info = "noirq type ";
633                 callback = pm_noirq_op(dev->type->pm, state);
634         } else if (dev->class && dev->class->pm) {
635                 info = "noirq class ";
636                 callback = pm_noirq_op(dev->class->pm, state);
637         } else if (dev->bus && dev->bus->pm) {
638                 info = "noirq bus ";
639                 callback = pm_noirq_op(dev->bus->pm, state);
640         }
641         if (callback)
642                 goto Run;
643
644         if (skip_resume)
645                 goto Skip;
646
647         if (dev->driver && dev->driver->pm) {
648                 info = "noirq driver ";
649                 callback = pm_noirq_op(dev->driver->pm, state);
650         }
651
652 Run:
653         error = dpm_run_callback(callback, dev, state, info);
654
655 Skip:
656         dev->power.is_noirq_suspended = false;
657
658 Out:
659         complete_all(&dev->power.completion);
660         TRACE_RESUME(error);
661         return error;
662 }
663
664 static bool is_async(struct device *dev)
665 {
666         return dev->power.async_suspend && pm_async_enabled
667                 && !pm_trace_is_enabled();
668 }
669
670 static bool dpm_async_fn(struct device *dev, async_func_t func)
671 {
672         reinit_completion(&dev->power.completion);
673
674         if (is_async(dev)) {
675                 get_device(dev);
676                 async_schedule_dev(func, dev);
677                 return true;
678         }
679
680         return false;
681 }
682
683 static void async_resume_noirq(void *data, async_cookie_t cookie)
684 {
685         struct device *dev = (struct device *)data;
686         int error;
687
688         error = device_resume_noirq(dev, pm_transition, true);
689         if (error)
690                 pm_dev_err(dev, pm_transition, " async", error);
691
692         put_device(dev);
693 }
694
695 static void dpm_noirq_resume_devices(pm_message_t state)
696 {
697         struct device *dev;
698         ktime_t starttime = ktime_get();
699
700         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
701         mutex_lock(&dpm_list_mtx);
702         pm_transition = state;
703
704         /*
705          * Advanced the async threads upfront,
706          * in case the starting of async threads is
707          * delayed by non-async resuming devices.
708          */
709         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
710                 dpm_async_fn(dev, async_resume_noirq);
711
712         while (!list_empty(&dpm_noirq_list)) {
713                 dev = to_device(dpm_noirq_list.next);
714                 get_device(dev);
715                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
716                 mutex_unlock(&dpm_list_mtx);
717
718                 if (!is_async(dev)) {
719                         int error;
720
721                         error = device_resume_noirq(dev, state, false);
722                         if (error) {
723                                 suspend_stats.failed_resume_noirq++;
724                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
725                                 dpm_save_failed_dev(dev_name(dev));
726                                 pm_dev_err(dev, state, " noirq", error);
727                         }
728                 }
729
730                 mutex_lock(&dpm_list_mtx);
731                 put_device(dev);
732         }
733         mutex_unlock(&dpm_list_mtx);
734         async_synchronize_full();
735         dpm_show_time(starttime, state, 0, "noirq");
736         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
737 }
738
739 /**
740  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
741  * @state: PM transition of the system being carried out.
742  *
743  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
744  * allow device drivers' interrupt handlers to be called.
745  */
746 void dpm_resume_noirq(pm_message_t state)
747 {
748         dpm_noirq_resume_devices(state);
749
750         resume_device_irqs();
751         device_wakeup_disarm_wake_irqs();
752
753         cpuidle_resume();
754 }
755
756 /**
757  * device_resume_early - Execute an "early resume" callback for given device.
758  * @dev: Device to handle.
759  * @state: PM transition of the system being carried out.
760  * @async: If true, the device is being resumed asynchronously.
761  *
762  * Runtime PM is disabled for @dev while this function is being executed.
763  */
764 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
765 {
766         pm_callback_t callback = NULL;
767         const char *info = NULL;
768         int error = 0;
769
770         TRACE_DEVICE(dev);
771         TRACE_RESUME(0);
772
773         if (dev->power.syscore || dev->power.direct_complete)
774                 goto Out;
775
776         if (!dev->power.is_late_suspended)
777                 goto Out;
778
779         if (!dpm_wait_for_superior(dev, async))
780                 goto Out;
781
782         if (dev->pm_domain) {
783                 info = "early power domain ";
784                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
785         } else if (dev->type && dev->type->pm) {
786                 info = "early type ";
787                 callback = pm_late_early_op(dev->type->pm, state);
788         } else if (dev->class && dev->class->pm) {
789                 info = "early class ";
790                 callback = pm_late_early_op(dev->class->pm, state);
791         } else if (dev->bus && dev->bus->pm) {
792                 info = "early bus ";
793                 callback = pm_late_early_op(dev->bus->pm, state);
794         }
795         if (callback)
796                 goto Run;
797
798         if (dev_pm_skip_resume(dev))
799                 goto Skip;
800
801         if (dev->driver && dev->driver->pm) {
802                 info = "early driver ";
803                 callback = pm_late_early_op(dev->driver->pm, state);
804         }
805
806 Run:
807         error = dpm_run_callback(callback, dev, state, info);
808
809 Skip:
810         dev->power.is_late_suspended = false;
811
812 Out:
813         TRACE_RESUME(error);
814
815         pm_runtime_enable(dev);
816         complete_all(&dev->power.completion);
817         return error;
818 }
819
820 static void async_resume_early(void *data, async_cookie_t cookie)
821 {
822         struct device *dev = (struct device *)data;
823         int error;
824
825         error = device_resume_early(dev, pm_transition, true);
826         if (error)
827                 pm_dev_err(dev, pm_transition, " async", error);
828
829         put_device(dev);
830 }
831
832 /**
833  * dpm_resume_early - Execute "early resume" callbacks for all devices.
834  * @state: PM transition of the system being carried out.
835  */
836 void dpm_resume_early(pm_message_t state)
837 {
838         struct device *dev;
839         ktime_t starttime = ktime_get();
840
841         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
842         mutex_lock(&dpm_list_mtx);
843         pm_transition = state;
844
845         /*
846          * Advanced the async threads upfront,
847          * in case the starting of async threads is
848          * delayed by non-async resuming devices.
849          */
850         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
851                 dpm_async_fn(dev, async_resume_early);
852
853         while (!list_empty(&dpm_late_early_list)) {
854                 dev = to_device(dpm_late_early_list.next);
855                 get_device(dev);
856                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
857                 mutex_unlock(&dpm_list_mtx);
858
859                 if (!is_async(dev)) {
860                         int error;
861
862                         error = device_resume_early(dev, state, false);
863                         if (error) {
864                                 suspend_stats.failed_resume_early++;
865                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
866                                 dpm_save_failed_dev(dev_name(dev));
867                                 pm_dev_err(dev, state, " early", error);
868                         }
869                 }
870                 mutex_lock(&dpm_list_mtx);
871                 put_device(dev);
872         }
873         mutex_unlock(&dpm_list_mtx);
874         async_synchronize_full();
875         dpm_show_time(starttime, state, 0, "early");
876         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
877 }
878
879 /**
880  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
881  * @state: PM transition of the system being carried out.
882  */
883 void dpm_resume_start(pm_message_t state)
884 {
885         dpm_resume_noirq(state);
886         dpm_resume_early(state);
887 }
888 EXPORT_SYMBOL_GPL(dpm_resume_start);
889
890 /**
891  * device_resume - Execute "resume" callbacks for given device.
892  * @dev: Device to handle.
893  * @state: PM transition of the system being carried out.
894  * @async: If true, the device is being resumed asynchronously.
895  */
896 static int device_resume(struct device *dev, pm_message_t state, bool async)
897 {
898         pm_callback_t callback = NULL;
899         const char *info = NULL;
900         int error = 0;
901         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
902
903         TRACE_DEVICE(dev);
904         TRACE_RESUME(0);
905
906         if (dev->power.syscore)
907                 goto Complete;
908
909         if (dev->power.direct_complete) {
910                 /* Match the pm_runtime_disable() in __device_suspend(). */
911                 pm_runtime_enable(dev);
912                 goto Complete;
913         }
914
915         if (!dpm_wait_for_superior(dev, async))
916                 goto Complete;
917
918         dpm_watchdog_set(&wd, dev);
919         device_lock(dev);
920
921         /*
922          * This is a fib.  But we'll allow new children to be added below
923          * a resumed device, even if the device hasn't been completed yet.
924          */
925         dev->power.is_prepared = false;
926
927         if (!dev->power.is_suspended)
928                 goto Unlock;
929
930         if (dev->pm_domain) {
931                 info = "power domain ";
932                 callback = pm_op(&dev->pm_domain->ops, state);
933                 goto Driver;
934         }
935
936         if (dev->type && dev->type->pm) {
937                 info = "type ";
938                 callback = pm_op(dev->type->pm, state);
939                 goto Driver;
940         }
941
942         if (dev->class && dev->class->pm) {
943                 info = "class ";
944                 callback = pm_op(dev->class->pm, state);
945                 goto Driver;
946         }
947
948         if (dev->bus) {
949                 if (dev->bus->pm) {
950                         info = "bus ";
951                         callback = pm_op(dev->bus->pm, state);
952                 } else if (dev->bus->resume) {
953                         info = "legacy bus ";
954                         callback = dev->bus->resume;
955                         goto End;
956                 }
957         }
958
959  Driver:
960         if (!callback && dev->driver && dev->driver->pm) {
961                 info = "driver ";
962                 callback = pm_op(dev->driver->pm, state);
963         }
964
965  End:
966         error = dpm_run_callback(callback, dev, state, info);
967         dev->power.is_suspended = false;
968
969  Unlock:
970         device_unlock(dev);
971         dpm_watchdog_clear(&wd);
972
973  Complete:
974         complete_all(&dev->power.completion);
975
976         TRACE_RESUME(error);
977
978         return error;
979 }
980
981 static void async_resume(void *data, async_cookie_t cookie)
982 {
983         struct device *dev = (struct device *)data;
984         int error;
985
986         error = device_resume(dev, pm_transition, true);
987         if (error)
988                 pm_dev_err(dev, pm_transition, " async", error);
989         put_device(dev);
990 }
991
992 /**
993  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994  * @state: PM transition of the system being carried out.
995  *
996  * Execute the appropriate "resume" callback for all devices whose status
997  * indicates that they are suspended.
998  */
999 void dpm_resume(pm_message_t state)
1000 {
1001         struct device *dev;
1002         ktime_t starttime = ktime_get();
1003
1004         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005         might_sleep();
1006
1007         mutex_lock(&dpm_list_mtx);
1008         pm_transition = state;
1009         async_error = 0;
1010
1011         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1012                 dpm_async_fn(dev, async_resume);
1013
1014         while (!list_empty(&dpm_suspended_list)) {
1015                 dev = to_device(dpm_suspended_list.next);
1016                 get_device(dev);
1017                 if (!is_async(dev)) {
1018                         int error;
1019
1020                         mutex_unlock(&dpm_list_mtx);
1021
1022                         error = device_resume(dev, state, false);
1023                         if (error) {
1024                                 suspend_stats.failed_resume++;
1025                                 dpm_save_failed_step(SUSPEND_RESUME);
1026                                 dpm_save_failed_dev(dev_name(dev));
1027                                 pm_dev_err(dev, state, "", error);
1028                         }
1029
1030                         mutex_lock(&dpm_list_mtx);
1031                 }
1032                 if (!list_empty(&dev->power.entry))
1033                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1034                 put_device(dev);
1035         }
1036         mutex_unlock(&dpm_list_mtx);
1037         async_synchronize_full();
1038         dpm_show_time(starttime, state, 0, NULL);
1039
1040         cpufreq_resume();
1041         devfreq_resume();
1042         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1043 }
1044
1045 /**
1046  * device_complete - Complete a PM transition for given device.
1047  * @dev: Device to handle.
1048  * @state: PM transition of the system being carried out.
1049  */
1050 static void device_complete(struct device *dev, pm_message_t state)
1051 {
1052         void (*callback)(struct device *) = NULL;
1053         const char *info = NULL;
1054
1055         if (dev->power.syscore)
1056                 goto out;
1057
1058         device_lock(dev);
1059
1060         if (dev->pm_domain) {
1061                 info = "completing power domain ";
1062                 callback = dev->pm_domain->ops.complete;
1063         } else if (dev->type && dev->type->pm) {
1064                 info = "completing type ";
1065                 callback = dev->type->pm->complete;
1066         } else if (dev->class && dev->class->pm) {
1067                 info = "completing class ";
1068                 callback = dev->class->pm->complete;
1069         } else if (dev->bus && dev->bus->pm) {
1070                 info = "completing bus ";
1071                 callback = dev->bus->pm->complete;
1072         }
1073
1074         if (!callback && dev->driver && dev->driver->pm) {
1075                 info = "completing driver ";
1076                 callback = dev->driver->pm->complete;
1077         }
1078
1079         if (callback) {
1080                 pm_dev_dbg(dev, state, info);
1081                 callback(dev);
1082         }
1083
1084         device_unlock(dev);
1085
1086 out:
1087         pm_runtime_put(dev);
1088 }
1089
1090 /**
1091  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092  * @state: PM transition of the system being carried out.
1093  *
1094  * Execute the ->complete() callbacks for all devices whose PM status is not
1095  * DPM_ON (this allows new devices to be registered).
1096  */
1097 void dpm_complete(pm_message_t state)
1098 {
1099         struct list_head list;
1100
1101         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102         might_sleep();
1103
1104         INIT_LIST_HEAD(&list);
1105         mutex_lock(&dpm_list_mtx);
1106         while (!list_empty(&dpm_prepared_list)) {
1107                 struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109                 get_device(dev);
1110                 dev->power.is_prepared = false;
1111                 list_move(&dev->power.entry, &list);
1112                 mutex_unlock(&dpm_list_mtx);
1113
1114                 trace_device_pm_callback_start(dev, "", state.event);
1115                 device_complete(dev, state);
1116                 trace_device_pm_callback_end(dev, 0);
1117
1118                 mutex_lock(&dpm_list_mtx);
1119                 put_device(dev);
1120         }
1121         list_splice(&list, &dpm_list);
1122         mutex_unlock(&dpm_list_mtx);
1123
1124         /* Allow device probing and trigger re-probing of deferred devices */
1125         device_unblock_probing();
1126         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127 }
1128
1129 /**
1130  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131  * @state: PM transition of the system being carried out.
1132  *
1133  * Execute "resume" callbacks for all devices and complete the PM transition of
1134  * the system.
1135  */
1136 void dpm_resume_end(pm_message_t state)
1137 {
1138         dpm_resume(state);
1139         dpm_complete(state);
1140 }
1141 EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144 /*------------------------- Suspend routines -------------------------*/
1145
1146 /**
1147  * resume_event - Return a "resume" message for given "suspend" sleep state.
1148  * @sleep_state: PM message representing a sleep state.
1149  *
1150  * Return a PM message representing the resume event corresponding to given
1151  * sleep state.
1152  */
1153 static pm_message_t resume_event(pm_message_t sleep_state)
1154 {
1155         switch (sleep_state.event) {
1156         case PM_EVENT_SUSPEND:
1157                 return PMSG_RESUME;
1158         case PM_EVENT_FREEZE:
1159         case PM_EVENT_QUIESCE:
1160                 return PMSG_RECOVER;
1161         case PM_EVENT_HIBERNATE:
1162                 return PMSG_RESTORE;
1163         }
1164         return PMSG_ON;
1165 }
1166
1167 static void dpm_superior_set_must_resume(struct device *dev)
1168 {
1169         struct device_link *link;
1170         int idx;
1171
1172         if (dev->parent)
1173                 dev->parent->power.must_resume = true;
1174
1175         idx = device_links_read_lock();
1176
1177         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178                 link->supplier->power.must_resume = true;
1179
1180         device_links_read_unlock(idx);
1181 }
1182
1183 /**
1184  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185  * @dev: Device to handle.
1186  * @state: PM transition of the system being carried out.
1187  * @async: If true, the device is being suspended asynchronously.
1188  *
1189  * The driver of @dev will not receive interrupts while this function is being
1190  * executed.
1191  */
1192 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193 {
1194         pm_callback_t callback = NULL;
1195         const char *info = NULL;
1196         int error = 0;
1197
1198         TRACE_DEVICE(dev);
1199         TRACE_SUSPEND(0);
1200
1201         dpm_wait_for_subordinate(dev, async);
1202
1203         if (async_error)
1204                 goto Complete;
1205
1206         if (dev->power.syscore || dev->power.direct_complete)
1207                 goto Complete;
1208
1209         if (dev->pm_domain) {
1210                 info = "noirq power domain ";
1211                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212         } else if (dev->type && dev->type->pm) {
1213                 info = "noirq type ";
1214                 callback = pm_noirq_op(dev->type->pm, state);
1215         } else if (dev->class && dev->class->pm) {
1216                 info = "noirq class ";
1217                 callback = pm_noirq_op(dev->class->pm, state);
1218         } else if (dev->bus && dev->bus->pm) {
1219                 info = "noirq bus ";
1220                 callback = pm_noirq_op(dev->bus->pm, state);
1221         }
1222         if (callback)
1223                 goto Run;
1224
1225         if (dev_pm_skip_suspend(dev))
1226                 goto Skip;
1227
1228         if (dev->driver && dev->driver->pm) {
1229                 info = "noirq driver ";
1230                 callback = pm_noirq_op(dev->driver->pm, state);
1231         }
1232
1233 Run:
1234         error = dpm_run_callback(callback, dev, state, info);
1235         if (error) {
1236                 async_error = error;
1237                 goto Complete;
1238         }
1239
1240 Skip:
1241         dev->power.is_noirq_suspended = true;
1242
1243         /*
1244          * Skipping the resume of devices that were in use right before the
1245          * system suspend (as indicated by their PM-runtime usage counters)
1246          * would be suboptimal.  Also resume them if doing that is not allowed
1247          * to be skipped.
1248          */
1249         if (atomic_read(&dev->power.usage_count) > 1 ||
1250             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251               dev->power.may_skip_resume))
1252                 dev->power.must_resume = true;
1253
1254         if (dev->power.must_resume)
1255                 dpm_superior_set_must_resume(dev);
1256
1257 Complete:
1258         complete_all(&dev->power.completion);
1259         TRACE_SUSPEND(error);
1260         return error;
1261 }
1262
1263 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264 {
1265         struct device *dev = (struct device *)data;
1266         int error;
1267
1268         error = __device_suspend_noirq(dev, pm_transition, true);
1269         if (error) {
1270                 dpm_save_failed_dev(dev_name(dev));
1271                 pm_dev_err(dev, pm_transition, " async", error);
1272         }
1273
1274         put_device(dev);
1275 }
1276
1277 static int device_suspend_noirq(struct device *dev)
1278 {
1279         if (dpm_async_fn(dev, async_suspend_noirq))
1280                 return 0;
1281
1282         return __device_suspend_noirq(dev, pm_transition, false);
1283 }
1284
1285 static int dpm_noirq_suspend_devices(pm_message_t state)
1286 {
1287         ktime_t starttime = ktime_get();
1288         int error = 0;
1289
1290         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291         mutex_lock(&dpm_list_mtx);
1292         pm_transition = state;
1293         async_error = 0;
1294
1295         while (!list_empty(&dpm_late_early_list)) {
1296                 struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298                 get_device(dev);
1299                 mutex_unlock(&dpm_list_mtx);
1300
1301                 error = device_suspend_noirq(dev);
1302
1303                 mutex_lock(&dpm_list_mtx);
1304                 if (error) {
1305                         pm_dev_err(dev, state, " noirq", error);
1306                         dpm_save_failed_dev(dev_name(dev));
1307                         put_device(dev);
1308                         break;
1309                 }
1310                 if (!list_empty(&dev->power.entry))
1311                         list_move(&dev->power.entry, &dpm_noirq_list);
1312                 put_device(dev);
1313
1314                 if (async_error)
1315                         break;
1316         }
1317         mutex_unlock(&dpm_list_mtx);
1318         async_synchronize_full();
1319         if (!error)
1320                 error = async_error;
1321
1322         if (error) {
1323                 suspend_stats.failed_suspend_noirq++;
1324                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325         }
1326         dpm_show_time(starttime, state, error, "noirq");
1327         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328         return error;
1329 }
1330
1331 /**
1332  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333  * @state: PM transition of the system being carried out.
1334  *
1335  * Prevent device drivers' interrupt handlers from being called and invoke
1336  * "noirq" suspend callbacks for all non-sysdev devices.
1337  */
1338 int dpm_suspend_noirq(pm_message_t state)
1339 {
1340         int ret;
1341
1342         cpuidle_pause();
1343
1344         device_wakeup_arm_wake_irqs();
1345         suspend_device_irqs();
1346
1347         ret = dpm_noirq_suspend_devices(state);
1348         if (ret)
1349                 dpm_resume_noirq(resume_event(state));
1350
1351         return ret;
1352 }
1353
1354 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355 {
1356         struct device *parent = dev->parent;
1357
1358         if (!parent)
1359                 return;
1360
1361         spin_lock_irq(&parent->power.lock);
1362
1363         if (dev->power.wakeup_path && !parent->power.ignore_children)
1364                 parent->power.wakeup_path = true;
1365
1366         spin_unlock_irq(&parent->power.lock);
1367 }
1368
1369 /**
1370  * __device_suspend_late - Execute a "late suspend" callback for given device.
1371  * @dev: Device to handle.
1372  * @state: PM transition of the system being carried out.
1373  * @async: If true, the device is being suspended asynchronously.
1374  *
1375  * Runtime PM is disabled for @dev while this function is being executed.
1376  */
1377 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378 {
1379         pm_callback_t callback = NULL;
1380         const char *info = NULL;
1381         int error = 0;
1382
1383         TRACE_DEVICE(dev);
1384         TRACE_SUSPEND(0);
1385
1386         __pm_runtime_disable(dev, false);
1387
1388         dpm_wait_for_subordinate(dev, async);
1389
1390         if (async_error)
1391                 goto Complete;
1392
1393         if (pm_wakeup_pending()) {
1394                 async_error = -EBUSY;
1395                 goto Complete;
1396         }
1397
1398         if (dev->power.syscore || dev->power.direct_complete)
1399                 goto Complete;
1400
1401         if (dev->pm_domain) {
1402                 info = "late power domain ";
1403                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404         } else if (dev->type && dev->type->pm) {
1405                 info = "late type ";
1406                 callback = pm_late_early_op(dev->type->pm, state);
1407         } else if (dev->class && dev->class->pm) {
1408                 info = "late class ";
1409                 callback = pm_late_early_op(dev->class->pm, state);
1410         } else if (dev->bus && dev->bus->pm) {
1411                 info = "late bus ";
1412                 callback = pm_late_early_op(dev->bus->pm, state);
1413         }
1414         if (callback)
1415                 goto Run;
1416
1417         if (dev_pm_skip_suspend(dev))
1418                 goto Skip;
1419
1420         if (dev->driver && dev->driver->pm) {
1421                 info = "late driver ";
1422                 callback = pm_late_early_op(dev->driver->pm, state);
1423         }
1424
1425 Run:
1426         error = dpm_run_callback(callback, dev, state, info);
1427         if (error) {
1428                 async_error = error;
1429                 goto Complete;
1430         }
1431         dpm_propagate_wakeup_to_parent(dev);
1432
1433 Skip:
1434         dev->power.is_late_suspended = true;
1435
1436 Complete:
1437         TRACE_SUSPEND(error);
1438         complete_all(&dev->power.completion);
1439         return error;
1440 }
1441
1442 static void async_suspend_late(void *data, async_cookie_t cookie)
1443 {
1444         struct device *dev = (struct device *)data;
1445         int error;
1446
1447         error = __device_suspend_late(dev, pm_transition, true);
1448         if (error) {
1449                 dpm_save_failed_dev(dev_name(dev));
1450                 pm_dev_err(dev, pm_transition, " async", error);
1451         }
1452         put_device(dev);
1453 }
1454
1455 static int device_suspend_late(struct device *dev)
1456 {
1457         if (dpm_async_fn(dev, async_suspend_late))
1458                 return 0;
1459
1460         return __device_suspend_late(dev, pm_transition, false);
1461 }
1462
1463 /**
1464  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465  * @state: PM transition of the system being carried out.
1466  */
1467 int dpm_suspend_late(pm_message_t state)
1468 {
1469         ktime_t starttime = ktime_get();
1470         int error = 0;
1471
1472         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473         mutex_lock(&dpm_list_mtx);
1474         pm_transition = state;
1475         async_error = 0;
1476
1477         while (!list_empty(&dpm_suspended_list)) {
1478                 struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480                 get_device(dev);
1481                 mutex_unlock(&dpm_list_mtx);
1482
1483                 error = device_suspend_late(dev);
1484
1485                 mutex_lock(&dpm_list_mtx);
1486                 if (!list_empty(&dev->power.entry))
1487                         list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489                 if (error) {
1490                         pm_dev_err(dev, state, " late", error);
1491                         dpm_save_failed_dev(dev_name(dev));
1492                         put_device(dev);
1493                         break;
1494                 }
1495                 put_device(dev);
1496
1497                 if (async_error)
1498                         break;
1499         }
1500         mutex_unlock(&dpm_list_mtx);
1501         async_synchronize_full();
1502         if (!error)
1503                 error = async_error;
1504         if (error) {
1505                 suspend_stats.failed_suspend_late++;
1506                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507                 dpm_resume_early(resume_event(state));
1508         }
1509         dpm_show_time(starttime, state, error, "late");
1510         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511         return error;
1512 }
1513
1514 /**
1515  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516  * @state: PM transition of the system being carried out.
1517  */
1518 int dpm_suspend_end(pm_message_t state)
1519 {
1520         ktime_t starttime = ktime_get();
1521         int error;
1522
1523         error = dpm_suspend_late(state);
1524         if (error)
1525                 goto out;
1526
1527         error = dpm_suspend_noirq(state);
1528         if (error)
1529                 dpm_resume_early(resume_event(state));
1530
1531 out:
1532         dpm_show_time(starttime, state, error, "end");
1533         return error;
1534 }
1535 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537 /**
1538  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539  * @dev: Device to suspend.
1540  * @state: PM transition of the system being carried out.
1541  * @cb: Suspend callback to execute.
1542  * @info: string description of caller.
1543  */
1544 static int legacy_suspend(struct device *dev, pm_message_t state,
1545                           int (*cb)(struct device *dev, pm_message_t state),
1546                           const char *info)
1547 {
1548         int error;
1549         ktime_t calltime;
1550
1551         calltime = initcall_debug_start(dev, cb);
1552
1553         trace_device_pm_callback_start(dev, info, state.event);
1554         error = cb(dev, state);
1555         trace_device_pm_callback_end(dev, error);
1556         suspend_report_result(cb, error);
1557
1558         initcall_debug_report(dev, calltime, cb, error);
1559
1560         return error;
1561 }
1562
1563 static void dpm_clear_superiors_direct_complete(struct device *dev)
1564 {
1565         struct device_link *link;
1566         int idx;
1567
1568         if (dev->parent) {
1569                 spin_lock_irq(&dev->parent->power.lock);
1570                 dev->parent->power.direct_complete = false;
1571                 spin_unlock_irq(&dev->parent->power.lock);
1572         }
1573
1574         idx = device_links_read_lock();
1575
1576         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577                 spin_lock_irq(&link->supplier->power.lock);
1578                 link->supplier->power.direct_complete = false;
1579                 spin_unlock_irq(&link->supplier->power.lock);
1580         }
1581
1582         device_links_read_unlock(idx);
1583 }
1584
1585 /**
1586  * __device_suspend - Execute "suspend" callbacks for given device.
1587  * @dev: Device to handle.
1588  * @state: PM transition of the system being carried out.
1589  * @async: If true, the device is being suspended asynchronously.
1590  */
1591 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592 {
1593         pm_callback_t callback = NULL;
1594         const char *info = NULL;
1595         int error = 0;
1596         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598         TRACE_DEVICE(dev);
1599         TRACE_SUSPEND(0);
1600
1601         dpm_wait_for_subordinate(dev, async);
1602
1603         if (async_error) {
1604                 dev->power.direct_complete = false;
1605                 goto Complete;
1606         }
1607
1608         /*
1609          * Wait for possible runtime PM transitions of the device in progress
1610          * to complete and if there's a runtime resume request pending for it,
1611          * resume it before proceeding with invoking the system-wide suspend
1612          * callbacks for it.
1613          *
1614          * If the system-wide suspend callbacks below change the configuration
1615          * of the device, they must disable runtime PM for it or otherwise
1616          * ensure that its runtime-resume callbacks will not be confused by that
1617          * change in case they are invoked going forward.
1618          */
1619         pm_runtime_barrier(dev);
1620
1621         if (pm_wakeup_pending()) {
1622                 dev->power.direct_complete = false;
1623                 async_error = -EBUSY;
1624                 goto Complete;
1625         }
1626
1627         if (dev->power.syscore)
1628                 goto Complete;
1629
1630         /* Avoid direct_complete to let wakeup_path propagate. */
1631         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632                 dev->power.direct_complete = false;
1633
1634         if (dev->power.direct_complete) {
1635                 if (pm_runtime_status_suspended(dev)) {
1636                         pm_runtime_disable(dev);
1637                         if (pm_runtime_status_suspended(dev)) {
1638                                 pm_dev_dbg(dev, state, "direct-complete ");
1639                                 goto Complete;
1640                         }
1641
1642                         pm_runtime_enable(dev);
1643                 }
1644                 dev->power.direct_complete = false;
1645         }
1646
1647         dev->power.may_skip_resume = true;
1648         dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1649
1650         dpm_watchdog_set(&wd, dev);
1651         device_lock(dev);
1652
1653         if (dev->pm_domain) {
1654                 info = "power domain ";
1655                 callback = pm_op(&dev->pm_domain->ops, state);
1656                 goto Run;
1657         }
1658
1659         if (dev->type && dev->type->pm) {
1660                 info = "type ";
1661                 callback = pm_op(dev->type->pm, state);
1662                 goto Run;
1663         }
1664
1665         if (dev->class && dev->class->pm) {
1666                 info = "class ";
1667                 callback = pm_op(dev->class->pm, state);
1668                 goto Run;
1669         }
1670
1671         if (dev->bus) {
1672                 if (dev->bus->pm) {
1673                         info = "bus ";
1674                         callback = pm_op(dev->bus->pm, state);
1675                 } else if (dev->bus->suspend) {
1676                         pm_dev_dbg(dev, state, "legacy bus ");
1677                         error = legacy_suspend(dev, state, dev->bus->suspend,
1678                                                 "legacy bus ");
1679                         goto End;
1680                 }
1681         }
1682
1683  Run:
1684         if (!callback && dev->driver && dev->driver->pm) {
1685                 info = "driver ";
1686                 callback = pm_op(dev->driver->pm, state);
1687         }
1688
1689         error = dpm_run_callback(callback, dev, state, info);
1690
1691  End:
1692         if (!error) {
1693                 dev->power.is_suspended = true;
1694                 if (device_may_wakeup(dev))
1695                         dev->power.wakeup_path = true;
1696
1697                 dpm_propagate_wakeup_to_parent(dev);
1698                 dpm_clear_superiors_direct_complete(dev);
1699         }
1700
1701         device_unlock(dev);
1702         dpm_watchdog_clear(&wd);
1703
1704  Complete:
1705         if (error)
1706                 async_error = error;
1707
1708         complete_all(&dev->power.completion);
1709         TRACE_SUSPEND(error);
1710         return error;
1711 }
1712
1713 static void async_suspend(void *data, async_cookie_t cookie)
1714 {
1715         struct device *dev = (struct device *)data;
1716         int error;
1717
1718         error = __device_suspend(dev, pm_transition, true);
1719         if (error) {
1720                 dpm_save_failed_dev(dev_name(dev));
1721                 pm_dev_err(dev, pm_transition, " async", error);
1722         }
1723
1724         put_device(dev);
1725 }
1726
1727 static int device_suspend(struct device *dev)
1728 {
1729         if (dpm_async_fn(dev, async_suspend))
1730                 return 0;
1731
1732         return __device_suspend(dev, pm_transition, false);
1733 }
1734
1735 /**
1736  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737  * @state: PM transition of the system being carried out.
1738  */
1739 int dpm_suspend(pm_message_t state)
1740 {
1741         ktime_t starttime = ktime_get();
1742         int error = 0;
1743
1744         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1745         might_sleep();
1746
1747         devfreq_suspend();
1748         cpufreq_suspend();
1749
1750         mutex_lock(&dpm_list_mtx);
1751         pm_transition = state;
1752         async_error = 0;
1753         while (!list_empty(&dpm_prepared_list)) {
1754                 struct device *dev = to_device(dpm_prepared_list.prev);
1755
1756                 get_device(dev);
1757                 mutex_unlock(&dpm_list_mtx);
1758
1759                 error = device_suspend(dev);
1760
1761                 mutex_lock(&dpm_list_mtx);
1762                 if (error) {
1763                         pm_dev_err(dev, state, "", error);
1764                         dpm_save_failed_dev(dev_name(dev));
1765                         put_device(dev);
1766                         break;
1767                 }
1768                 if (!list_empty(&dev->power.entry))
1769                         list_move(&dev->power.entry, &dpm_suspended_list);
1770                 put_device(dev);
1771                 if (async_error)
1772                         break;
1773         }
1774         mutex_unlock(&dpm_list_mtx);
1775         async_synchronize_full();
1776         if (!error)
1777                 error = async_error;
1778         if (error) {
1779                 suspend_stats.failed_suspend++;
1780                 dpm_save_failed_step(SUSPEND_SUSPEND);
1781         }
1782         dpm_show_time(starttime, state, error, NULL);
1783         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1784         return error;
1785 }
1786
1787 /**
1788  * device_prepare - Prepare a device for system power transition.
1789  * @dev: Device to handle.
1790  * @state: PM transition of the system being carried out.
1791  *
1792  * Execute the ->prepare() callback(s) for given device.  No new children of the
1793  * device may be registered after this function has returned.
1794  */
1795 static int device_prepare(struct device *dev, pm_message_t state)
1796 {
1797         int (*callback)(struct device *) = NULL;
1798         int ret = 0;
1799
1800         /*
1801          * If a device's parent goes into runtime suspend at the wrong time,
1802          * it won't be possible to resume the device.  To prevent this we
1803          * block runtime suspend here, during the prepare phase, and allow
1804          * it again during the complete phase.
1805          */
1806         pm_runtime_get_noresume(dev);
1807
1808         if (dev->power.syscore)
1809                 return 0;
1810
1811         device_lock(dev);
1812
1813         dev->power.wakeup_path = false;
1814
1815         if (dev->power.no_pm_callbacks)
1816                 goto unlock;
1817
1818         if (dev->pm_domain)
1819                 callback = dev->pm_domain->ops.prepare;
1820         else if (dev->type && dev->type->pm)
1821                 callback = dev->type->pm->prepare;
1822         else if (dev->class && dev->class->pm)
1823                 callback = dev->class->pm->prepare;
1824         else if (dev->bus && dev->bus->pm)
1825                 callback = dev->bus->pm->prepare;
1826
1827         if (!callback && dev->driver && dev->driver->pm)
1828                 callback = dev->driver->pm->prepare;
1829
1830         if (callback)
1831                 ret = callback(dev);
1832
1833 unlock:
1834         device_unlock(dev);
1835
1836         if (ret < 0) {
1837                 suspend_report_result(callback, ret);
1838                 pm_runtime_put(dev);
1839                 return ret;
1840         }
1841         /*
1842          * A positive return value from ->prepare() means "this device appears
1843          * to be runtime-suspended and its state is fine, so if it really is
1844          * runtime-suspended, you can leave it in that state provided that you
1845          * will do the same thing with all of its descendants".  This only
1846          * applies to suspend transitions, however.
1847          */
1848         spin_lock_irq(&dev->power.lock);
1849         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850                 (ret > 0 || dev->power.no_pm_callbacks) &&
1851                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1852         spin_unlock_irq(&dev->power.lock);
1853         return 0;
1854 }
1855
1856 /**
1857  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858  * @state: PM transition of the system being carried out.
1859  *
1860  * Execute the ->prepare() callback(s) for all devices.
1861  */
1862 int dpm_prepare(pm_message_t state)
1863 {
1864         int error = 0;
1865
1866         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1867         might_sleep();
1868
1869         /*
1870          * Give a chance for the known devices to complete their probes, before
1871          * disable probing of devices. This sync point is important at least
1872          * at boot time + hibernation restore.
1873          */
1874         wait_for_device_probe();
1875         /*
1876          * It is unsafe if probing of devices will happen during suspend or
1877          * hibernation and system behavior will be unpredictable in this case.
1878          * So, let's prohibit device's probing here and defer their probes
1879          * instead. The normal behavior will be restored in dpm_complete().
1880          */
1881         device_block_probing();
1882
1883         mutex_lock(&dpm_list_mtx);
1884         while (!list_empty(&dpm_list)) {
1885                 struct device *dev = to_device(dpm_list.next);
1886
1887                 get_device(dev);
1888                 mutex_unlock(&dpm_list_mtx);
1889
1890                 trace_device_pm_callback_start(dev, "", state.event);
1891                 error = device_prepare(dev, state);
1892                 trace_device_pm_callback_end(dev, error);
1893
1894                 mutex_lock(&dpm_list_mtx);
1895                 if (error) {
1896                         if (error == -EAGAIN) {
1897                                 put_device(dev);
1898                                 error = 0;
1899                                 continue;
1900                         }
1901                         pr_info("Device %s not prepared for power transition: code %d\n",
1902                                 dev_name(dev), error);
1903                         put_device(dev);
1904                         break;
1905                 }
1906                 dev->power.is_prepared = true;
1907                 if (!list_empty(&dev->power.entry))
1908                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1909                 put_device(dev);
1910         }
1911         mutex_unlock(&dpm_list_mtx);
1912         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1913         return error;
1914 }
1915
1916 /**
1917  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918  * @state: PM transition of the system being carried out.
1919  *
1920  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921  * callbacks for them.
1922  */
1923 int dpm_suspend_start(pm_message_t state)
1924 {
1925         ktime_t starttime = ktime_get();
1926         int error;
1927
1928         error = dpm_prepare(state);
1929         if (error) {
1930                 suspend_stats.failed_prepare++;
1931                 dpm_save_failed_step(SUSPEND_PREPARE);
1932         } else
1933                 error = dpm_suspend(state);
1934         dpm_show_time(starttime, state, error, "start");
1935         return error;
1936 }
1937 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1938
1939 void __suspend_report_result(const char *function, void *fn, int ret)
1940 {
1941         if (ret)
1942                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1943 }
1944 EXPORT_SYMBOL_GPL(__suspend_report_result);
1945
1946 /**
1947  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948  * @subordinate: Device that needs to wait for @dev.
1949  * @dev: Device to wait for.
1950  */
1951 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1952 {
1953         dpm_wait(dev, subordinate->power.async_suspend);
1954         return async_error;
1955 }
1956 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1957
1958 /**
1959  * dpm_for_each_dev - device iterator.
1960  * @data: data for the callback.
1961  * @fn: function to be called for each device.
1962  *
1963  * Iterate over devices in dpm_list, and call @fn for each device,
1964  * passing it @data.
1965  */
1966 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967 {
1968         struct device *dev;
1969
1970         if (!fn)
1971                 return;
1972
1973         device_pm_lock();
1974         list_for_each_entry(dev, &dpm_list, power.entry)
1975                 fn(dev, data);
1976         device_pm_unlock();
1977 }
1978 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1979
1980 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1981 {
1982         if (!ops)
1983                 return true;
1984
1985         return !ops->prepare &&
1986                !ops->suspend &&
1987                !ops->suspend_late &&
1988                !ops->suspend_noirq &&
1989                !ops->resume_noirq &&
1990                !ops->resume_early &&
1991                !ops->resume &&
1992                !ops->complete;
1993 }
1994
1995 void device_pm_check_callbacks(struct device *dev)
1996 {
1997         unsigned long flags;
1998
1999         spin_lock_irqsave(&dev->power.lock, flags);
2000         dev->power.no_pm_callbacks =
2001                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2002                  !dev->bus->suspend && !dev->bus->resume)) &&
2003                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2004                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2005                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2006                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2007                  !dev->driver->suspend && !dev->driver->resume));
2008         spin_unlock_irqrestore(&dev->power.lock, flags);
2009 }
2010
2011 bool dev_pm_skip_suspend(struct device *dev)
2012 {
2013         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2014                 pm_runtime_status_suspended(dev);
2015 }