GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44         list_for_each_entry_rcu(pos, head, member, \
45                         device_links_read_lock_held())
46
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66
67 static int async_error;
68
69 static const char *pm_verb(int event)
70 {
71         switch (event) {
72         case PM_EVENT_SUSPEND:
73                 return "suspend";
74         case PM_EVENT_RESUME:
75                 return "resume";
76         case PM_EVENT_FREEZE:
77                 return "freeze";
78         case PM_EVENT_QUIESCE:
79                 return "quiesce";
80         case PM_EVENT_HIBERNATE:
81                 return "hibernate";
82         case PM_EVENT_THAW:
83                 return "thaw";
84         case PM_EVENT_RESTORE:
85                 return "restore";
86         case PM_EVENT_RECOVER:
87                 return "recover";
88         default:
89                 return "(unknown PM event)";
90         }
91 }
92
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
97 void device_pm_sleep_init(struct device *dev)
98 {
99         dev->power.is_prepared = false;
100         dev->power.is_suspended = false;
101         dev->power.is_noirq_suspended = false;
102         dev->power.is_late_suspended = false;
103         init_completion(&dev->power.completion);
104         complete_all(&dev->power.completion);
105         dev->power.wakeup = NULL;
106         INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
112 void device_pm_lock(void)
113 {
114         mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
120 void device_pm_unlock(void)
121 {
122         mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
129 void device_pm_add(struct device *dev)
130 {
131         /* Skip PM setup/initialization. */
132         if (device_pm_not_required(dev))
133                 return;
134
135         pr_debug("Adding info for %s:%s\n",
136                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137         device_pm_check_callbacks(dev);
138         mutex_lock(&dpm_list_mtx);
139         if (dev->parent && dev->parent->power.is_prepared)
140                 dev_warn(dev, "parent %s should not be sleeping\n",
141                         dev_name(dev->parent));
142         list_add_tail(&dev->power.entry, &dpm_list);
143         dev->power.in_dpm_list = true;
144         mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
151 void device_pm_remove(struct device *dev)
152 {
153         if (device_pm_not_required(dev))
154                 return;
155
156         pr_debug("Removing info for %s:%s\n",
157                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158         complete_all(&dev->power.completion);
159         mutex_lock(&dpm_list_mtx);
160         list_del_init(&dev->power.entry);
161         dev->power.in_dpm_list = false;
162         mutex_unlock(&dpm_list_mtx);
163         device_wakeup_disable(dev);
164         pm_runtime_remove(dev);
165         device_pm_check_callbacks(dev);
166 }
167
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175         pr_debug("Moving %s:%s before %s:%s\n",
176                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178         /* Delete deva from dpm_list and reinsert before devb. */
179         list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189         pr_debug("Moving %s:%s after %s:%s\n",
190                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192         /* Delete deva from dpm_list and reinsert after devb. */
193         list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
200 void device_pm_move_last(struct device *dev)
201 {
202         pr_debug("Moving %s:%s to end of list\n",
203                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204         list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209         if (!pm_print_times_enabled)
210                 return 0;
211
212         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213                  task_pid_nr(current),
214                  dev->parent ? dev_name(dev->parent) : "none");
215         return ktime_get();
216 }
217
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219                                   void *cb, int error)
220 {
221         ktime_t rettime;
222
223         if (!pm_print_times_enabled)
224                 return;
225
226         rettime = ktime_get();
227         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
228                  (unsigned long long)ktime_us_delta(rettime, calltime));
229 }
230
231 /**
232  * dpm_wait - Wait for a PM operation to complete.
233  * @dev: Device to wait for.
234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
235  */
236 static void dpm_wait(struct device *dev, bool async)
237 {
238         if (!dev)
239                 return;
240
241         if (async || (pm_async_enabled && dev->power.async_suspend))
242                 wait_for_completion(&dev->power.completion);
243 }
244
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247         dpm_wait(dev, *((bool *)async_ptr));
248         return 0;
249 }
250
251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253        device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258         struct device_link *link;
259         int idx;
260
261         idx = device_links_read_lock();
262
263         /*
264          * If the supplier goes away right after we've checked the link to it,
265          * we'll wait for its completion to change the state, but that's fine,
266          * because the only things that will block as a result are the SRCU
267          * callbacks freeing the link objects for the links in the list we're
268          * walking.
269          */
270         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272                         dpm_wait(link->supplier, async);
273
274         device_links_read_unlock(idx);
275 }
276
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279         struct device *parent;
280
281         /*
282          * If the device is resumed asynchronously and the parent's callback
283          * deletes both the device and the parent itself, the parent object may
284          * be freed while this function is running, so avoid that by reference
285          * counting the parent once more unless the device has been deleted
286          * already (in which case return right away).
287          */
288         mutex_lock(&dpm_list_mtx);
289
290         if (!device_pm_initialized(dev)) {
291                 mutex_unlock(&dpm_list_mtx);
292                 return false;
293         }
294
295         parent = get_device(dev->parent);
296
297         mutex_unlock(&dpm_list_mtx);
298
299         dpm_wait(parent, async);
300         put_device(parent);
301
302         dpm_wait_for_suppliers(dev, async);
303
304         /*
305          * If the parent's callback has deleted the device, attempting to resume
306          * it would be invalid, so avoid doing that then.
307          */
308         return device_pm_initialized(dev);
309 }
310
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313         struct device_link *link;
314         int idx;
315
316         idx = device_links_read_lock();
317
318         /*
319          * The status of a device link can only be changed from "dormant" by a
320          * probe, but that cannot happen during system suspend/resume.  In
321          * theory it can change to "dormant" at that time, but then it is
322          * reasonable to wait for the target device anyway (eg. if it goes
323          * away, it's better to wait for it to go away completely and then
324          * continue instead of trying to continue in parallel with its
325          * unregistration).
326          */
327         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329                         dpm_wait(link->consumer, async);
330
331         device_links_read_unlock(idx);
332 }
333
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336         dpm_wait_for_children(dev, async);
337         dpm_wait_for_consumers(dev, async);
338 }
339
340 /**
341  * pm_op - Return the PM operation appropriate for given PM event.
342  * @ops: PM operations to choose from.
343  * @state: PM transition of the system being carried out.
344  */
345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347         switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349         case PM_EVENT_SUSPEND:
350                 return ops->suspend;
351         case PM_EVENT_RESUME:
352                 return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355         case PM_EVENT_FREEZE:
356         case PM_EVENT_QUIESCE:
357                 return ops->freeze;
358         case PM_EVENT_HIBERNATE:
359                 return ops->poweroff;
360         case PM_EVENT_THAW:
361         case PM_EVENT_RECOVER:
362                 return ops->thaw;
363         case PM_EVENT_RESTORE:
364                 return ops->restore;
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366         }
367
368         return NULL;
369 }
370
371 /**
372  * pm_late_early_op - Return the PM operation appropriate for given PM event.
373  * @ops: PM operations to choose from.
374  * @state: PM transition of the system being carried out.
375  *
376  * Runtime PM is disabled for @dev while this function is being executed.
377  */
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379                                       pm_message_t state)
380 {
381         switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383         case PM_EVENT_SUSPEND:
384                 return ops->suspend_late;
385         case PM_EVENT_RESUME:
386                 return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389         case PM_EVENT_FREEZE:
390         case PM_EVENT_QUIESCE:
391                 return ops->freeze_late;
392         case PM_EVENT_HIBERNATE:
393                 return ops->poweroff_late;
394         case PM_EVENT_THAW:
395         case PM_EVENT_RECOVER:
396                 return ops->thaw_early;
397         case PM_EVENT_RESTORE:
398                 return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400         }
401
402         return NULL;
403 }
404
405 /**
406  * pm_noirq_op - Return the PM operation appropriate for given PM event.
407  * @ops: PM operations to choose from.
408  * @state: PM transition of the system being carried out.
409  *
410  * The driver of @dev will not receive interrupts while this function is being
411  * executed.
412  */
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415         switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417         case PM_EVENT_SUSPEND:
418                 return ops->suspend_noirq;
419         case PM_EVENT_RESUME:
420                 return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423         case PM_EVENT_FREEZE:
424         case PM_EVENT_QUIESCE:
425                 return ops->freeze_noirq;
426         case PM_EVENT_HIBERNATE:
427                 return ops->poweroff_noirq;
428         case PM_EVENT_THAW:
429         case PM_EVENT_RECOVER:
430                 return ops->thaw_noirq;
431         case PM_EVENT_RESTORE:
432                 return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
434         }
435
436         return NULL;
437 }
438
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441         dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443                 ", may wakeup" : "", dev->power.driver_flags);
444 }
445
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447                         int error)
448 {
449         dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450                 error);
451 }
452
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454                           const char *info)
455 {
456         ktime_t calltime;
457         u64 usecs64;
458         int usecs;
459
460         calltime = ktime_get();
461         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462         do_div(usecs64, NSEC_PER_USEC);
463         usecs = usecs64;
464         if (usecs == 0)
465                 usecs = 1;
466
467         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468                   info ?: "", info ? " " : "", pm_verb(state.event),
469                   error ? "aborted" : "complete",
470                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474                             pm_message_t state, const char *info)
475 {
476         ktime_t calltime;
477         int error;
478
479         if (!cb)
480                 return 0;
481
482         calltime = initcall_debug_start(dev, cb);
483
484         pm_dev_dbg(dev, state, info);
485         trace_device_pm_callback_start(dev, info, state.event);
486         error = cb(dev);
487         trace_device_pm_callback_end(dev, error);
488         suspend_report_result(dev, cb, error);
489
490         initcall_debug_report(dev, calltime, cb, error);
491
492         return error;
493 }
494
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497         struct device           *dev;
498         struct task_struct      *tsk;
499         struct timer_list       timer;
500 };
501
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503         struct dpm_watchdog wd
504
505 /**
506  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507  * @t: The timer that PM watchdog depends on.
508  *
509  * Called when a driver has timed out suspending or resuming.
510  * There's not much we can do here to recover so panic() to
511  * capture a crash-dump in pstore.
512  */
513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515         struct dpm_watchdog *wd = from_timer(wd, t, timer);
516
517         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518         show_stack(wd->tsk, NULL, KERN_EMERG);
519         panic("%s %s: unrecoverable failure\n",
520                 dev_driver_string(wd->dev), dev_name(wd->dev));
521 }
522
523 /**
524  * dpm_watchdog_set - Enable pm watchdog for given device.
525  * @wd: Watchdog. Must be allocated on the stack.
526  * @dev: Device to handle.
527  */
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
529 {
530         struct timer_list *timer = &wd->timer;
531
532         wd->dev = dev;
533         wd->tsk = current;
534
535         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536         /* use same timeout value for both suspend and resume */
537         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
538         add_timer(timer);
539 }
540
541 /**
542  * dpm_watchdog_clear - Disable suspend/resume watchdog.
543  * @wd: Watchdog to disable.
544  */
545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
546 {
547         struct timer_list *timer = &wd->timer;
548
549         del_timer_sync(timer);
550         destroy_timer_on_stack(timer);
551 }
552 #else
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
556 #endif
557
558 /*------------------------- Resume routines -------------------------*/
559
560 /**
561  * dev_pm_skip_resume - System-wide device resume optimization check.
562  * @dev: Target device.
563  *
564  * Return:
565  * - %false if the transition under way is RESTORE.
566  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
567  * - The logical negation of %power.must_resume otherwise (that is, when the
568  *   transition under way is RESUME).
569  */
570 bool dev_pm_skip_resume(struct device *dev)
571 {
572         if (pm_transition.event == PM_EVENT_RESTORE)
573                 return false;
574
575         if (pm_transition.event == PM_EVENT_THAW)
576                 return dev_pm_skip_suspend(dev);
577
578         return !dev->power.must_resume;
579 }
580
581 /**
582  * device_resume_noirq - Execute a "noirq resume" callback for given device.
583  * @dev: Device to handle.
584  * @state: PM transition of the system being carried out.
585  * @async: If true, the device is being resumed asynchronously.
586  *
587  * The driver of @dev will not receive interrupts while this function is being
588  * executed.
589  */
590 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
591 {
592         pm_callback_t callback = NULL;
593         const char *info = NULL;
594         bool skip_resume;
595         int error = 0;
596
597         TRACE_DEVICE(dev);
598         TRACE_RESUME(0);
599
600         if (dev->power.syscore || dev->power.direct_complete)
601                 goto Out;
602
603         if (!dev->power.is_noirq_suspended)
604                 goto Out;
605
606         if (!dpm_wait_for_superior(dev, async))
607                 goto Out;
608
609         skip_resume = dev_pm_skip_resume(dev);
610         /*
611          * If the driver callback is skipped below or by the middle layer
612          * callback and device_resume_early() also skips the driver callback for
613          * this device later, it needs to appear as "suspended" to PM-runtime,
614          * so change its status accordingly.
615          *
616          * Otherwise, the device is going to be resumed, so set its PM-runtime
617          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
618          * to avoid confusing drivers that don't use it.
619          */
620         if (skip_resume)
621                 pm_runtime_set_suspended(dev);
622         else if (dev_pm_skip_suspend(dev))
623                 pm_runtime_set_active(dev);
624
625         if (dev->pm_domain) {
626                 info = "noirq power domain ";
627                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628         } else if (dev->type && dev->type->pm) {
629                 info = "noirq type ";
630                 callback = pm_noirq_op(dev->type->pm, state);
631         } else if (dev->class && dev->class->pm) {
632                 info = "noirq class ";
633                 callback = pm_noirq_op(dev->class->pm, state);
634         } else if (dev->bus && dev->bus->pm) {
635                 info = "noirq bus ";
636                 callback = pm_noirq_op(dev->bus->pm, state);
637         }
638         if (callback)
639                 goto Run;
640
641         if (skip_resume)
642                 goto Skip;
643
644         if (dev->driver && dev->driver->pm) {
645                 info = "noirq driver ";
646                 callback = pm_noirq_op(dev->driver->pm, state);
647         }
648
649 Run:
650         error = dpm_run_callback(callback, dev, state, info);
651
652 Skip:
653         dev->power.is_noirq_suspended = false;
654
655 Out:
656         complete_all(&dev->power.completion);
657         TRACE_RESUME(error);
658
659         if (error) {
660                 suspend_stats.failed_resume_noirq++;
661                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
662                 dpm_save_failed_dev(dev_name(dev));
663                 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
664         }
665 }
666
667 static bool is_async(struct device *dev)
668 {
669         return dev->power.async_suspend && pm_async_enabled
670                 && !pm_trace_is_enabled();
671 }
672
673 static bool dpm_async_fn(struct device *dev, async_func_t func)
674 {
675         reinit_completion(&dev->power.completion);
676
677         if (is_async(dev)) {
678                 dev->power.async_in_progress = true;
679
680                 get_device(dev);
681
682                 if (async_schedule_dev_nocall(func, dev))
683                         return true;
684
685                 put_device(dev);
686         }
687         /*
688          * Because async_schedule_dev_nocall() above has returned false or it
689          * has not been called at all, func() is not running and it is safe to
690          * update the async_in_progress flag without extra synchronization.
691          */
692         dev->power.async_in_progress = false;
693         return false;
694 }
695
696 static void async_resume_noirq(void *data, async_cookie_t cookie)
697 {
698         struct device *dev = data;
699
700         device_resume_noirq(dev, pm_transition, true);
701         put_device(dev);
702 }
703
704 static void dpm_noirq_resume_devices(pm_message_t state)
705 {
706         struct device *dev;
707         ktime_t starttime = ktime_get();
708
709         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
710         mutex_lock(&dpm_list_mtx);
711         pm_transition = state;
712
713         /*
714          * Trigger the resume of "async" devices upfront so they don't have to
715          * wait for the "non-async" ones they don't depend on.
716          */
717         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
718                 dpm_async_fn(dev, async_resume_noirq);
719
720         while (!list_empty(&dpm_noirq_list)) {
721                 dev = to_device(dpm_noirq_list.next);
722                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
723
724                 if (!dev->power.async_in_progress) {
725                         get_device(dev);
726
727                         mutex_unlock(&dpm_list_mtx);
728
729                         device_resume_noirq(dev, state, false);
730
731                         put_device(dev);
732
733                         mutex_lock(&dpm_list_mtx);
734                 }
735         }
736         mutex_unlock(&dpm_list_mtx);
737         async_synchronize_full();
738         dpm_show_time(starttime, state, 0, "noirq");
739         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
740 }
741
742 /**
743  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
744  * @state: PM transition of the system being carried out.
745  *
746  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
747  * allow device drivers' interrupt handlers to be called.
748  */
749 void dpm_resume_noirq(pm_message_t state)
750 {
751         dpm_noirq_resume_devices(state);
752
753         resume_device_irqs();
754         device_wakeup_disarm_wake_irqs();
755 }
756
757 /**
758  * device_resume_early - Execute an "early resume" callback for given device.
759  * @dev: Device to handle.
760  * @state: PM transition of the system being carried out.
761  * @async: If true, the device is being resumed asynchronously.
762  *
763  * Runtime PM is disabled for @dev while this function is being executed.
764  */
765 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
766 {
767         pm_callback_t callback = NULL;
768         const char *info = NULL;
769         int error = 0;
770
771         TRACE_DEVICE(dev);
772         TRACE_RESUME(0);
773
774         if (dev->power.syscore || dev->power.direct_complete)
775                 goto Out;
776
777         if (!dev->power.is_late_suspended)
778                 goto Out;
779
780         if (!dpm_wait_for_superior(dev, async))
781                 goto Out;
782
783         if (dev->pm_domain) {
784                 info = "early power domain ";
785                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
786         } else if (dev->type && dev->type->pm) {
787                 info = "early type ";
788                 callback = pm_late_early_op(dev->type->pm, state);
789         } else if (dev->class && dev->class->pm) {
790                 info = "early class ";
791                 callback = pm_late_early_op(dev->class->pm, state);
792         } else if (dev->bus && dev->bus->pm) {
793                 info = "early bus ";
794                 callback = pm_late_early_op(dev->bus->pm, state);
795         }
796         if (callback)
797                 goto Run;
798
799         if (dev_pm_skip_resume(dev))
800                 goto Skip;
801
802         if (dev->driver && dev->driver->pm) {
803                 info = "early driver ";
804                 callback = pm_late_early_op(dev->driver->pm, state);
805         }
806
807 Run:
808         error = dpm_run_callback(callback, dev, state, info);
809
810 Skip:
811         dev->power.is_late_suspended = false;
812
813 Out:
814         TRACE_RESUME(error);
815
816         pm_runtime_enable(dev);
817         complete_all(&dev->power.completion);
818
819         if (error) {
820                 suspend_stats.failed_resume_early++;
821                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
822                 dpm_save_failed_dev(dev_name(dev));
823                 pm_dev_err(dev, state, async ? " async early" : " early", error);
824         }
825 }
826
827 static void async_resume_early(void *data, async_cookie_t cookie)
828 {
829         struct device *dev = data;
830
831         device_resume_early(dev, pm_transition, true);
832         put_device(dev);
833 }
834
835 /**
836  * dpm_resume_early - Execute "early resume" callbacks for all devices.
837  * @state: PM transition of the system being carried out.
838  */
839 void dpm_resume_early(pm_message_t state)
840 {
841         struct device *dev;
842         ktime_t starttime = ktime_get();
843
844         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
845         mutex_lock(&dpm_list_mtx);
846         pm_transition = state;
847
848         /*
849          * Trigger the resume of "async" devices upfront so they don't have to
850          * wait for the "non-async" ones they don't depend on.
851          */
852         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853                 dpm_async_fn(dev, async_resume_early);
854
855         while (!list_empty(&dpm_late_early_list)) {
856                 dev = to_device(dpm_late_early_list.next);
857                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
858
859                 if (!dev->power.async_in_progress) {
860                         get_device(dev);
861
862                         mutex_unlock(&dpm_list_mtx);
863
864                         device_resume_early(dev, state, false);
865
866                         put_device(dev);
867
868                         mutex_lock(&dpm_list_mtx);
869                 }
870         }
871         mutex_unlock(&dpm_list_mtx);
872         async_synchronize_full();
873         dpm_show_time(starttime, state, 0, "early");
874         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
875 }
876
877 /**
878  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
879  * @state: PM transition of the system being carried out.
880  */
881 void dpm_resume_start(pm_message_t state)
882 {
883         dpm_resume_noirq(state);
884         dpm_resume_early(state);
885 }
886 EXPORT_SYMBOL_GPL(dpm_resume_start);
887
888 /**
889  * device_resume - Execute "resume" callbacks for given device.
890  * @dev: Device to handle.
891  * @state: PM transition of the system being carried out.
892  * @async: If true, the device is being resumed asynchronously.
893  */
894 static void device_resume(struct device *dev, pm_message_t state, bool async)
895 {
896         pm_callback_t callback = NULL;
897         const char *info = NULL;
898         int error = 0;
899         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
900
901         TRACE_DEVICE(dev);
902         TRACE_RESUME(0);
903
904         if (dev->power.syscore)
905                 goto Complete;
906
907         if (dev->power.direct_complete) {
908                 /* Match the pm_runtime_disable() in __device_suspend(). */
909                 pm_runtime_enable(dev);
910                 goto Complete;
911         }
912
913         if (!dpm_wait_for_superior(dev, async))
914                 goto Complete;
915
916         dpm_watchdog_set(&wd, dev);
917         device_lock(dev);
918
919         /*
920          * This is a fib.  But we'll allow new children to be added below
921          * a resumed device, even if the device hasn't been completed yet.
922          */
923         dev->power.is_prepared = false;
924
925         if (!dev->power.is_suspended)
926                 goto Unlock;
927
928         if (dev->pm_domain) {
929                 info = "power domain ";
930                 callback = pm_op(&dev->pm_domain->ops, state);
931                 goto Driver;
932         }
933
934         if (dev->type && dev->type->pm) {
935                 info = "type ";
936                 callback = pm_op(dev->type->pm, state);
937                 goto Driver;
938         }
939
940         if (dev->class && dev->class->pm) {
941                 info = "class ";
942                 callback = pm_op(dev->class->pm, state);
943                 goto Driver;
944         }
945
946         if (dev->bus) {
947                 if (dev->bus->pm) {
948                         info = "bus ";
949                         callback = pm_op(dev->bus->pm, state);
950                 } else if (dev->bus->resume) {
951                         info = "legacy bus ";
952                         callback = dev->bus->resume;
953                         goto End;
954                 }
955         }
956
957  Driver:
958         if (!callback && dev->driver && dev->driver->pm) {
959                 info = "driver ";
960                 callback = pm_op(dev->driver->pm, state);
961         }
962
963  End:
964         error = dpm_run_callback(callback, dev, state, info);
965         dev->power.is_suspended = false;
966
967  Unlock:
968         device_unlock(dev);
969         dpm_watchdog_clear(&wd);
970
971  Complete:
972         complete_all(&dev->power.completion);
973
974         TRACE_RESUME(error);
975
976         if (error) {
977                 suspend_stats.failed_resume++;
978                 dpm_save_failed_step(SUSPEND_RESUME);
979                 dpm_save_failed_dev(dev_name(dev));
980                 pm_dev_err(dev, state, async ? " async" : "", error);
981         }
982 }
983
984 static void async_resume(void *data, async_cookie_t cookie)
985 {
986         struct device *dev = data;
987
988         device_resume(dev, pm_transition, true);
989         put_device(dev);
990 }
991
992 /**
993  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994  * @state: PM transition of the system being carried out.
995  *
996  * Execute the appropriate "resume" callback for all devices whose status
997  * indicates that they are suspended.
998  */
999 void dpm_resume(pm_message_t state)
1000 {
1001         struct device *dev;
1002         ktime_t starttime = ktime_get();
1003
1004         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005         might_sleep();
1006
1007         mutex_lock(&dpm_list_mtx);
1008         pm_transition = state;
1009         async_error = 0;
1010
1011         /*
1012          * Trigger the resume of "async" devices upfront so they don't have to
1013          * wait for the "non-async" ones they don't depend on.
1014          */
1015         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1016                 dpm_async_fn(dev, async_resume);
1017
1018         while (!list_empty(&dpm_suspended_list)) {
1019                 dev = to_device(dpm_suspended_list.next);
1020
1021                 get_device(dev);
1022
1023                 if (!dev->power.async_in_progress) {
1024                         mutex_unlock(&dpm_list_mtx);
1025
1026                         device_resume(dev, state, false);
1027
1028                         mutex_lock(&dpm_list_mtx);
1029                 }
1030
1031                 if (!list_empty(&dev->power.entry))
1032                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1033
1034                 mutex_unlock(&dpm_list_mtx);
1035
1036                 put_device(dev);
1037
1038                 mutex_lock(&dpm_list_mtx);
1039         }
1040         mutex_unlock(&dpm_list_mtx);
1041         async_synchronize_full();
1042         dpm_show_time(starttime, state, 0, NULL);
1043
1044         cpufreq_resume();
1045         devfreq_resume();
1046         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1047 }
1048
1049 /**
1050  * device_complete - Complete a PM transition for given device.
1051  * @dev: Device to handle.
1052  * @state: PM transition of the system being carried out.
1053  */
1054 static void device_complete(struct device *dev, pm_message_t state)
1055 {
1056         void (*callback)(struct device *) = NULL;
1057         const char *info = NULL;
1058
1059         if (dev->power.syscore)
1060                 goto out;
1061
1062         device_lock(dev);
1063
1064         if (dev->pm_domain) {
1065                 info = "completing power domain ";
1066                 callback = dev->pm_domain->ops.complete;
1067         } else if (dev->type && dev->type->pm) {
1068                 info = "completing type ";
1069                 callback = dev->type->pm->complete;
1070         } else if (dev->class && dev->class->pm) {
1071                 info = "completing class ";
1072                 callback = dev->class->pm->complete;
1073         } else if (dev->bus && dev->bus->pm) {
1074                 info = "completing bus ";
1075                 callback = dev->bus->pm->complete;
1076         }
1077
1078         if (!callback && dev->driver && dev->driver->pm) {
1079                 info = "completing driver ";
1080                 callback = dev->driver->pm->complete;
1081         }
1082
1083         if (callback) {
1084                 pm_dev_dbg(dev, state, info);
1085                 callback(dev);
1086         }
1087
1088         device_unlock(dev);
1089
1090 out:
1091         pm_runtime_put(dev);
1092 }
1093
1094 /**
1095  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1096  * @state: PM transition of the system being carried out.
1097  *
1098  * Execute the ->complete() callbacks for all devices whose PM status is not
1099  * DPM_ON (this allows new devices to be registered).
1100  */
1101 void dpm_complete(pm_message_t state)
1102 {
1103         struct list_head list;
1104
1105         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1106         might_sleep();
1107
1108         INIT_LIST_HEAD(&list);
1109         mutex_lock(&dpm_list_mtx);
1110         while (!list_empty(&dpm_prepared_list)) {
1111                 struct device *dev = to_device(dpm_prepared_list.prev);
1112
1113                 get_device(dev);
1114                 dev->power.is_prepared = false;
1115                 list_move(&dev->power.entry, &list);
1116
1117                 mutex_unlock(&dpm_list_mtx);
1118
1119                 trace_device_pm_callback_start(dev, "", state.event);
1120                 device_complete(dev, state);
1121                 trace_device_pm_callback_end(dev, 0);
1122
1123                 put_device(dev);
1124
1125                 mutex_lock(&dpm_list_mtx);
1126         }
1127         list_splice(&list, &dpm_list);
1128         mutex_unlock(&dpm_list_mtx);
1129
1130         /* Allow device probing and trigger re-probing of deferred devices */
1131         device_unblock_probing();
1132         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1133 }
1134
1135 /**
1136  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1137  * @state: PM transition of the system being carried out.
1138  *
1139  * Execute "resume" callbacks for all devices and complete the PM transition of
1140  * the system.
1141  */
1142 void dpm_resume_end(pm_message_t state)
1143 {
1144         dpm_resume(state);
1145         dpm_complete(state);
1146 }
1147 EXPORT_SYMBOL_GPL(dpm_resume_end);
1148
1149
1150 /*------------------------- Suspend routines -------------------------*/
1151
1152 /**
1153  * resume_event - Return a "resume" message for given "suspend" sleep state.
1154  * @sleep_state: PM message representing a sleep state.
1155  *
1156  * Return a PM message representing the resume event corresponding to given
1157  * sleep state.
1158  */
1159 static pm_message_t resume_event(pm_message_t sleep_state)
1160 {
1161         switch (sleep_state.event) {
1162         case PM_EVENT_SUSPEND:
1163                 return PMSG_RESUME;
1164         case PM_EVENT_FREEZE:
1165         case PM_EVENT_QUIESCE:
1166                 return PMSG_RECOVER;
1167         case PM_EVENT_HIBERNATE:
1168                 return PMSG_RESTORE;
1169         }
1170         return PMSG_ON;
1171 }
1172
1173 static void dpm_superior_set_must_resume(struct device *dev)
1174 {
1175         struct device_link *link;
1176         int idx;
1177
1178         if (dev->parent)
1179                 dev->parent->power.must_resume = true;
1180
1181         idx = device_links_read_lock();
1182
1183         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1184                 link->supplier->power.must_resume = true;
1185
1186         device_links_read_unlock(idx);
1187 }
1188
1189 /**
1190  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1191  * @dev: Device to handle.
1192  * @state: PM transition of the system being carried out.
1193  * @async: If true, the device is being suspended asynchronously.
1194  *
1195  * The driver of @dev will not receive interrupts while this function is being
1196  * executed.
1197  */
1198 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1199 {
1200         pm_callback_t callback = NULL;
1201         const char *info = NULL;
1202         int error = 0;
1203
1204         TRACE_DEVICE(dev);
1205         TRACE_SUSPEND(0);
1206
1207         dpm_wait_for_subordinate(dev, async);
1208
1209         if (async_error)
1210                 goto Complete;
1211
1212         if (dev->power.syscore || dev->power.direct_complete)
1213                 goto Complete;
1214
1215         if (dev->pm_domain) {
1216                 info = "noirq power domain ";
1217                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1218         } else if (dev->type && dev->type->pm) {
1219                 info = "noirq type ";
1220                 callback = pm_noirq_op(dev->type->pm, state);
1221         } else if (dev->class && dev->class->pm) {
1222                 info = "noirq class ";
1223                 callback = pm_noirq_op(dev->class->pm, state);
1224         } else if (dev->bus && dev->bus->pm) {
1225                 info = "noirq bus ";
1226                 callback = pm_noirq_op(dev->bus->pm, state);
1227         }
1228         if (callback)
1229                 goto Run;
1230
1231         if (dev_pm_skip_suspend(dev))
1232                 goto Skip;
1233
1234         if (dev->driver && dev->driver->pm) {
1235                 info = "noirq driver ";
1236                 callback = pm_noirq_op(dev->driver->pm, state);
1237         }
1238
1239 Run:
1240         error = dpm_run_callback(callback, dev, state, info);
1241         if (error) {
1242                 async_error = error;
1243                 goto Complete;
1244         }
1245
1246 Skip:
1247         dev->power.is_noirq_suspended = true;
1248
1249         /*
1250          * Skipping the resume of devices that were in use right before the
1251          * system suspend (as indicated by their PM-runtime usage counters)
1252          * would be suboptimal.  Also resume them if doing that is not allowed
1253          * to be skipped.
1254          */
1255         if (atomic_read(&dev->power.usage_count) > 1 ||
1256             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1257               dev->power.may_skip_resume))
1258                 dev->power.must_resume = true;
1259
1260         if (dev->power.must_resume)
1261                 dpm_superior_set_must_resume(dev);
1262
1263 Complete:
1264         complete_all(&dev->power.completion);
1265         TRACE_SUSPEND(error);
1266         return error;
1267 }
1268
1269 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1270 {
1271         struct device *dev = data;
1272         int error;
1273
1274         error = __device_suspend_noirq(dev, pm_transition, true);
1275         if (error) {
1276                 dpm_save_failed_dev(dev_name(dev));
1277                 pm_dev_err(dev, pm_transition, " async", error);
1278         }
1279
1280         put_device(dev);
1281 }
1282
1283 static int device_suspend_noirq(struct device *dev)
1284 {
1285         if (dpm_async_fn(dev, async_suspend_noirq))
1286                 return 0;
1287
1288         return __device_suspend_noirq(dev, pm_transition, false);
1289 }
1290
1291 static int dpm_noirq_suspend_devices(pm_message_t state)
1292 {
1293         ktime_t starttime = ktime_get();
1294         int error = 0;
1295
1296         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1297         mutex_lock(&dpm_list_mtx);
1298         pm_transition = state;
1299         async_error = 0;
1300
1301         while (!list_empty(&dpm_late_early_list)) {
1302                 struct device *dev = to_device(dpm_late_early_list.prev);
1303
1304                 get_device(dev);
1305                 mutex_unlock(&dpm_list_mtx);
1306
1307                 error = device_suspend_noirq(dev);
1308
1309                 mutex_lock(&dpm_list_mtx);
1310
1311                 if (error) {
1312                         pm_dev_err(dev, state, " noirq", error);
1313                         dpm_save_failed_dev(dev_name(dev));
1314                 } else if (!list_empty(&dev->power.entry)) {
1315                         list_move(&dev->power.entry, &dpm_noirq_list);
1316                 }
1317
1318                 mutex_unlock(&dpm_list_mtx);
1319
1320                 put_device(dev);
1321
1322                 mutex_lock(&dpm_list_mtx);
1323
1324                 if (error || async_error)
1325                         break;
1326         }
1327         mutex_unlock(&dpm_list_mtx);
1328         async_synchronize_full();
1329         if (!error)
1330                 error = async_error;
1331
1332         if (error) {
1333                 suspend_stats.failed_suspend_noirq++;
1334                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1335         }
1336         dpm_show_time(starttime, state, error, "noirq");
1337         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1338         return error;
1339 }
1340
1341 /**
1342  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1343  * @state: PM transition of the system being carried out.
1344  *
1345  * Prevent device drivers' interrupt handlers from being called and invoke
1346  * "noirq" suspend callbacks for all non-sysdev devices.
1347  */
1348 int dpm_suspend_noirq(pm_message_t state)
1349 {
1350         int ret;
1351
1352         device_wakeup_arm_wake_irqs();
1353         suspend_device_irqs();
1354
1355         ret = dpm_noirq_suspend_devices(state);
1356         if (ret)
1357                 dpm_resume_noirq(resume_event(state));
1358
1359         return ret;
1360 }
1361
1362 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1363 {
1364         struct device *parent = dev->parent;
1365
1366         if (!parent)
1367                 return;
1368
1369         spin_lock_irq(&parent->power.lock);
1370
1371         if (device_wakeup_path(dev) && !parent->power.ignore_children)
1372                 parent->power.wakeup_path = true;
1373
1374         spin_unlock_irq(&parent->power.lock);
1375 }
1376
1377 /**
1378  * __device_suspend_late - Execute a "late suspend" callback for given device.
1379  * @dev: Device to handle.
1380  * @state: PM transition of the system being carried out.
1381  * @async: If true, the device is being suspended asynchronously.
1382  *
1383  * Runtime PM is disabled for @dev while this function is being executed.
1384  */
1385 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1386 {
1387         pm_callback_t callback = NULL;
1388         const char *info = NULL;
1389         int error = 0;
1390
1391         TRACE_DEVICE(dev);
1392         TRACE_SUSPEND(0);
1393
1394         __pm_runtime_disable(dev, false);
1395
1396         dpm_wait_for_subordinate(dev, async);
1397
1398         if (async_error)
1399                 goto Complete;
1400
1401         if (pm_wakeup_pending()) {
1402                 async_error = -EBUSY;
1403                 goto Complete;
1404         }
1405
1406         if (dev->power.syscore || dev->power.direct_complete)
1407                 goto Complete;
1408
1409         if (dev->pm_domain) {
1410                 info = "late power domain ";
1411                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1412         } else if (dev->type && dev->type->pm) {
1413                 info = "late type ";
1414                 callback = pm_late_early_op(dev->type->pm, state);
1415         } else if (dev->class && dev->class->pm) {
1416                 info = "late class ";
1417                 callback = pm_late_early_op(dev->class->pm, state);
1418         } else if (dev->bus && dev->bus->pm) {
1419                 info = "late bus ";
1420                 callback = pm_late_early_op(dev->bus->pm, state);
1421         }
1422         if (callback)
1423                 goto Run;
1424
1425         if (dev_pm_skip_suspend(dev))
1426                 goto Skip;
1427
1428         if (dev->driver && dev->driver->pm) {
1429                 info = "late driver ";
1430                 callback = pm_late_early_op(dev->driver->pm, state);
1431         }
1432
1433 Run:
1434         error = dpm_run_callback(callback, dev, state, info);
1435         if (error) {
1436                 async_error = error;
1437                 goto Complete;
1438         }
1439         dpm_propagate_wakeup_to_parent(dev);
1440
1441 Skip:
1442         dev->power.is_late_suspended = true;
1443
1444 Complete:
1445         TRACE_SUSPEND(error);
1446         complete_all(&dev->power.completion);
1447         return error;
1448 }
1449
1450 static void async_suspend_late(void *data, async_cookie_t cookie)
1451 {
1452         struct device *dev = data;
1453         int error;
1454
1455         error = __device_suspend_late(dev, pm_transition, true);
1456         if (error) {
1457                 dpm_save_failed_dev(dev_name(dev));
1458                 pm_dev_err(dev, pm_transition, " async", error);
1459         }
1460         put_device(dev);
1461 }
1462
1463 static int device_suspend_late(struct device *dev)
1464 {
1465         if (dpm_async_fn(dev, async_suspend_late))
1466                 return 0;
1467
1468         return __device_suspend_late(dev, pm_transition, false);
1469 }
1470
1471 /**
1472  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1473  * @state: PM transition of the system being carried out.
1474  */
1475 int dpm_suspend_late(pm_message_t state)
1476 {
1477         ktime_t starttime = ktime_get();
1478         int error = 0;
1479
1480         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1481         wake_up_all_idle_cpus();
1482         mutex_lock(&dpm_list_mtx);
1483         pm_transition = state;
1484         async_error = 0;
1485
1486         while (!list_empty(&dpm_suspended_list)) {
1487                 struct device *dev = to_device(dpm_suspended_list.prev);
1488
1489                 get_device(dev);
1490
1491                 mutex_unlock(&dpm_list_mtx);
1492
1493                 error = device_suspend_late(dev);
1494
1495                 mutex_lock(&dpm_list_mtx);
1496
1497                 if (!list_empty(&dev->power.entry))
1498                         list_move(&dev->power.entry, &dpm_late_early_list);
1499
1500                 if (error) {
1501                         pm_dev_err(dev, state, " late", error);
1502                         dpm_save_failed_dev(dev_name(dev));
1503                 }
1504
1505                 mutex_unlock(&dpm_list_mtx);
1506
1507                 put_device(dev);
1508
1509                 mutex_lock(&dpm_list_mtx);
1510
1511                 if (error || async_error)
1512                         break;
1513         }
1514         mutex_unlock(&dpm_list_mtx);
1515         async_synchronize_full();
1516         if (!error)
1517                 error = async_error;
1518         if (error) {
1519                 suspend_stats.failed_suspend_late++;
1520                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1521                 dpm_resume_early(resume_event(state));
1522         }
1523         dpm_show_time(starttime, state, error, "late");
1524         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1525         return error;
1526 }
1527
1528 /**
1529  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1530  * @state: PM transition of the system being carried out.
1531  */
1532 int dpm_suspend_end(pm_message_t state)
1533 {
1534         ktime_t starttime = ktime_get();
1535         int error;
1536
1537         error = dpm_suspend_late(state);
1538         if (error)
1539                 goto out;
1540
1541         error = dpm_suspend_noirq(state);
1542         if (error)
1543                 dpm_resume_early(resume_event(state));
1544
1545 out:
1546         dpm_show_time(starttime, state, error, "end");
1547         return error;
1548 }
1549 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1550
1551 /**
1552  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1553  * @dev: Device to suspend.
1554  * @state: PM transition of the system being carried out.
1555  * @cb: Suspend callback to execute.
1556  * @info: string description of caller.
1557  */
1558 static int legacy_suspend(struct device *dev, pm_message_t state,
1559                           int (*cb)(struct device *dev, pm_message_t state),
1560                           const char *info)
1561 {
1562         int error;
1563         ktime_t calltime;
1564
1565         calltime = initcall_debug_start(dev, cb);
1566
1567         trace_device_pm_callback_start(dev, info, state.event);
1568         error = cb(dev, state);
1569         trace_device_pm_callback_end(dev, error);
1570         suspend_report_result(dev, cb, error);
1571
1572         initcall_debug_report(dev, calltime, cb, error);
1573
1574         return error;
1575 }
1576
1577 static void dpm_clear_superiors_direct_complete(struct device *dev)
1578 {
1579         struct device_link *link;
1580         int idx;
1581
1582         if (dev->parent) {
1583                 spin_lock_irq(&dev->parent->power.lock);
1584                 dev->parent->power.direct_complete = false;
1585                 spin_unlock_irq(&dev->parent->power.lock);
1586         }
1587
1588         idx = device_links_read_lock();
1589
1590         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1591                 spin_lock_irq(&link->supplier->power.lock);
1592                 link->supplier->power.direct_complete = false;
1593                 spin_unlock_irq(&link->supplier->power.lock);
1594         }
1595
1596         device_links_read_unlock(idx);
1597 }
1598
1599 /**
1600  * __device_suspend - Execute "suspend" callbacks for given device.
1601  * @dev: Device to handle.
1602  * @state: PM transition of the system being carried out.
1603  * @async: If true, the device is being suspended asynchronously.
1604  */
1605 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1606 {
1607         pm_callback_t callback = NULL;
1608         const char *info = NULL;
1609         int error = 0;
1610         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1611
1612         TRACE_DEVICE(dev);
1613         TRACE_SUSPEND(0);
1614
1615         dpm_wait_for_subordinate(dev, async);
1616
1617         if (async_error) {
1618                 dev->power.direct_complete = false;
1619                 goto Complete;
1620         }
1621
1622         /*
1623          * Wait for possible runtime PM transitions of the device in progress
1624          * to complete and if there's a runtime resume request pending for it,
1625          * resume it before proceeding with invoking the system-wide suspend
1626          * callbacks for it.
1627          *
1628          * If the system-wide suspend callbacks below change the configuration
1629          * of the device, they must disable runtime PM for it or otherwise
1630          * ensure that its runtime-resume callbacks will not be confused by that
1631          * change in case they are invoked going forward.
1632          */
1633         pm_runtime_barrier(dev);
1634
1635         if (pm_wakeup_pending()) {
1636                 dev->power.direct_complete = false;
1637                 async_error = -EBUSY;
1638                 goto Complete;
1639         }
1640
1641         if (dev->power.syscore)
1642                 goto Complete;
1643
1644         /* Avoid direct_complete to let wakeup_path propagate. */
1645         if (device_may_wakeup(dev) || device_wakeup_path(dev))
1646                 dev->power.direct_complete = false;
1647
1648         if (dev->power.direct_complete) {
1649                 if (pm_runtime_status_suspended(dev)) {
1650                         pm_runtime_disable(dev);
1651                         if (pm_runtime_status_suspended(dev)) {
1652                                 pm_dev_dbg(dev, state, "direct-complete ");
1653                                 goto Complete;
1654                         }
1655
1656                         pm_runtime_enable(dev);
1657                 }
1658                 dev->power.direct_complete = false;
1659         }
1660
1661         dev->power.may_skip_resume = true;
1662         dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1663
1664         dpm_watchdog_set(&wd, dev);
1665         device_lock(dev);
1666
1667         if (dev->pm_domain) {
1668                 info = "power domain ";
1669                 callback = pm_op(&dev->pm_domain->ops, state);
1670                 goto Run;
1671         }
1672
1673         if (dev->type && dev->type->pm) {
1674                 info = "type ";
1675                 callback = pm_op(dev->type->pm, state);
1676                 goto Run;
1677         }
1678
1679         if (dev->class && dev->class->pm) {
1680                 info = "class ";
1681                 callback = pm_op(dev->class->pm, state);
1682                 goto Run;
1683         }
1684
1685         if (dev->bus) {
1686                 if (dev->bus->pm) {
1687                         info = "bus ";
1688                         callback = pm_op(dev->bus->pm, state);
1689                 } else if (dev->bus->suspend) {
1690                         pm_dev_dbg(dev, state, "legacy bus ");
1691                         error = legacy_suspend(dev, state, dev->bus->suspend,
1692                                                 "legacy bus ");
1693                         goto End;
1694                 }
1695         }
1696
1697  Run:
1698         if (!callback && dev->driver && dev->driver->pm) {
1699                 info = "driver ";
1700                 callback = pm_op(dev->driver->pm, state);
1701         }
1702
1703         error = dpm_run_callback(callback, dev, state, info);
1704
1705  End:
1706         if (!error) {
1707                 dev->power.is_suspended = true;
1708                 if (device_may_wakeup(dev))
1709                         dev->power.wakeup_path = true;
1710
1711                 dpm_propagate_wakeup_to_parent(dev);
1712                 dpm_clear_superiors_direct_complete(dev);
1713         }
1714
1715         device_unlock(dev);
1716         dpm_watchdog_clear(&wd);
1717
1718  Complete:
1719         if (error)
1720                 async_error = error;
1721
1722         complete_all(&dev->power.completion);
1723         TRACE_SUSPEND(error);
1724         return error;
1725 }
1726
1727 static void async_suspend(void *data, async_cookie_t cookie)
1728 {
1729         struct device *dev = data;
1730         int error;
1731
1732         error = __device_suspend(dev, pm_transition, true);
1733         if (error) {
1734                 dpm_save_failed_dev(dev_name(dev));
1735                 pm_dev_err(dev, pm_transition, " async", error);
1736         }
1737
1738         put_device(dev);
1739 }
1740
1741 static int device_suspend(struct device *dev)
1742 {
1743         if (dpm_async_fn(dev, async_suspend))
1744                 return 0;
1745
1746         return __device_suspend(dev, pm_transition, false);
1747 }
1748
1749 /**
1750  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1751  * @state: PM transition of the system being carried out.
1752  */
1753 int dpm_suspend(pm_message_t state)
1754 {
1755         ktime_t starttime = ktime_get();
1756         int error = 0;
1757
1758         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1759         might_sleep();
1760
1761         devfreq_suspend();
1762         cpufreq_suspend();
1763
1764         mutex_lock(&dpm_list_mtx);
1765         pm_transition = state;
1766         async_error = 0;
1767         while (!list_empty(&dpm_prepared_list)) {
1768                 struct device *dev = to_device(dpm_prepared_list.prev);
1769
1770                 get_device(dev);
1771
1772                 mutex_unlock(&dpm_list_mtx);
1773
1774                 error = device_suspend(dev);
1775
1776                 mutex_lock(&dpm_list_mtx);
1777
1778                 if (error) {
1779                         pm_dev_err(dev, state, "", error);
1780                         dpm_save_failed_dev(dev_name(dev));
1781                 } else if (!list_empty(&dev->power.entry)) {
1782                         list_move(&dev->power.entry, &dpm_suspended_list);
1783                 }
1784
1785                 mutex_unlock(&dpm_list_mtx);
1786
1787                 put_device(dev);
1788
1789                 mutex_lock(&dpm_list_mtx);
1790
1791                 if (error || async_error)
1792                         break;
1793         }
1794         mutex_unlock(&dpm_list_mtx);
1795         async_synchronize_full();
1796         if (!error)
1797                 error = async_error;
1798         if (error) {
1799                 suspend_stats.failed_suspend++;
1800                 dpm_save_failed_step(SUSPEND_SUSPEND);
1801         }
1802         dpm_show_time(starttime, state, error, NULL);
1803         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1804         return error;
1805 }
1806
1807 /**
1808  * device_prepare - Prepare a device for system power transition.
1809  * @dev: Device to handle.
1810  * @state: PM transition of the system being carried out.
1811  *
1812  * Execute the ->prepare() callback(s) for given device.  No new children of the
1813  * device may be registered after this function has returned.
1814  */
1815 static int device_prepare(struct device *dev, pm_message_t state)
1816 {
1817         int (*callback)(struct device *) = NULL;
1818         int ret = 0;
1819
1820         /*
1821          * If a device's parent goes into runtime suspend at the wrong time,
1822          * it won't be possible to resume the device.  To prevent this we
1823          * block runtime suspend here, during the prepare phase, and allow
1824          * it again during the complete phase.
1825          */
1826         pm_runtime_get_noresume(dev);
1827
1828         if (dev->power.syscore)
1829                 return 0;
1830
1831         device_lock(dev);
1832
1833         dev->power.wakeup_path = false;
1834
1835         if (dev->power.no_pm_callbacks)
1836                 goto unlock;
1837
1838         if (dev->pm_domain)
1839                 callback = dev->pm_domain->ops.prepare;
1840         else if (dev->type && dev->type->pm)
1841                 callback = dev->type->pm->prepare;
1842         else if (dev->class && dev->class->pm)
1843                 callback = dev->class->pm->prepare;
1844         else if (dev->bus && dev->bus->pm)
1845                 callback = dev->bus->pm->prepare;
1846
1847         if (!callback && dev->driver && dev->driver->pm)
1848                 callback = dev->driver->pm->prepare;
1849
1850         if (callback)
1851                 ret = callback(dev);
1852
1853 unlock:
1854         device_unlock(dev);
1855
1856         if (ret < 0) {
1857                 suspend_report_result(dev, callback, ret);
1858                 pm_runtime_put(dev);
1859                 return ret;
1860         }
1861         /*
1862          * A positive return value from ->prepare() means "this device appears
1863          * to be runtime-suspended and its state is fine, so if it really is
1864          * runtime-suspended, you can leave it in that state provided that you
1865          * will do the same thing with all of its descendants".  This only
1866          * applies to suspend transitions, however.
1867          */
1868         spin_lock_irq(&dev->power.lock);
1869         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1870                 (ret > 0 || dev->power.no_pm_callbacks) &&
1871                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1872         spin_unlock_irq(&dev->power.lock);
1873         return 0;
1874 }
1875
1876 /**
1877  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1878  * @state: PM transition of the system being carried out.
1879  *
1880  * Execute the ->prepare() callback(s) for all devices.
1881  */
1882 int dpm_prepare(pm_message_t state)
1883 {
1884         int error = 0;
1885
1886         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1887         might_sleep();
1888
1889         /*
1890          * Give a chance for the known devices to complete their probes, before
1891          * disable probing of devices. This sync point is important at least
1892          * at boot time + hibernation restore.
1893          */
1894         wait_for_device_probe();
1895         /*
1896          * It is unsafe if probing of devices will happen during suspend or
1897          * hibernation and system behavior will be unpredictable in this case.
1898          * So, let's prohibit device's probing here and defer their probes
1899          * instead. The normal behavior will be restored in dpm_complete().
1900          */
1901         device_block_probing();
1902
1903         mutex_lock(&dpm_list_mtx);
1904         while (!list_empty(&dpm_list) && !error) {
1905                 struct device *dev = to_device(dpm_list.next);
1906
1907                 get_device(dev);
1908
1909                 mutex_unlock(&dpm_list_mtx);
1910
1911                 trace_device_pm_callback_start(dev, "", state.event);
1912                 error = device_prepare(dev, state);
1913                 trace_device_pm_callback_end(dev, error);
1914
1915                 mutex_lock(&dpm_list_mtx);
1916
1917                 if (!error) {
1918                         dev->power.is_prepared = true;
1919                         if (!list_empty(&dev->power.entry))
1920                                 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1921                 } else if (error == -EAGAIN) {
1922                         error = 0;
1923                 } else {
1924                         dev_info(dev, "not prepared for power transition: code %d\n",
1925                                  error);
1926                 }
1927
1928                 mutex_unlock(&dpm_list_mtx);
1929
1930                 put_device(dev);
1931
1932                 mutex_lock(&dpm_list_mtx);
1933         }
1934         mutex_unlock(&dpm_list_mtx);
1935         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1936         return error;
1937 }
1938
1939 /**
1940  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1941  * @state: PM transition of the system being carried out.
1942  *
1943  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1944  * callbacks for them.
1945  */
1946 int dpm_suspend_start(pm_message_t state)
1947 {
1948         ktime_t starttime = ktime_get();
1949         int error;
1950
1951         error = dpm_prepare(state);
1952         if (error) {
1953                 suspend_stats.failed_prepare++;
1954                 dpm_save_failed_step(SUSPEND_PREPARE);
1955         } else
1956                 error = dpm_suspend(state);
1957         dpm_show_time(starttime, state, error, "start");
1958         return error;
1959 }
1960 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1961
1962 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1963 {
1964         if (ret)
1965                 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1966 }
1967 EXPORT_SYMBOL_GPL(__suspend_report_result);
1968
1969 /**
1970  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1971  * @subordinate: Device that needs to wait for @dev.
1972  * @dev: Device to wait for.
1973  */
1974 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1975 {
1976         dpm_wait(dev, subordinate->power.async_suspend);
1977         return async_error;
1978 }
1979 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1980
1981 /**
1982  * dpm_for_each_dev - device iterator.
1983  * @data: data for the callback.
1984  * @fn: function to be called for each device.
1985  *
1986  * Iterate over devices in dpm_list, and call @fn for each device,
1987  * passing it @data.
1988  */
1989 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1990 {
1991         struct device *dev;
1992
1993         if (!fn)
1994                 return;
1995
1996         device_pm_lock();
1997         list_for_each_entry(dev, &dpm_list, power.entry)
1998                 fn(dev, data);
1999         device_pm_unlock();
2000 }
2001 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2002
2003 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2004 {
2005         if (!ops)
2006                 return true;
2007
2008         return !ops->prepare &&
2009                !ops->suspend &&
2010                !ops->suspend_late &&
2011                !ops->suspend_noirq &&
2012                !ops->resume_noirq &&
2013                !ops->resume_early &&
2014                !ops->resume &&
2015                !ops->complete;
2016 }
2017
2018 void device_pm_check_callbacks(struct device *dev)
2019 {
2020         unsigned long flags;
2021
2022         spin_lock_irqsave(&dev->power.lock, flags);
2023         dev->power.no_pm_callbacks =
2024                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2025                  !dev->bus->suspend && !dev->bus->resume)) &&
2026                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2027                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2028                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2029                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2030                  !dev->driver->suspend && !dev->driver->resume));
2031         spin_unlock_irqrestore(&dev->power.lock, flags);
2032 }
2033
2034 bool dev_pm_skip_suspend(struct device *dev)
2035 {
2036         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2037                 pm_runtime_status_suspended(dev);
2038 }