1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/power/main.c - PM subsystem core functionality.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
9 #include <linux/export.h>
10 #include <linux/kobject.h>
11 #include <linux/string.h>
12 #include <linux/pm-trace.h>
13 #include <linux/workqueue.h>
14 #include <linux/debugfs.h>
15 #include <linux/seq_file.h>
16 #include <linux/suspend.h>
17 #include <linux/syscalls.h>
18 #include <linux/pm_runtime.h>
22 #ifdef CONFIG_PM_SLEEP
24 void lock_system_sleep(void)
26 current->flags |= PF_FREEZER_SKIP;
27 mutex_lock(&system_transition_mutex);
29 EXPORT_SYMBOL_GPL(lock_system_sleep);
31 void unlock_system_sleep(void)
34 * Don't use freezer_count() because we don't want the call to
35 * try_to_freeze() here.
38 * Fundamentally, we just don't need it, because freezing condition
39 * doesn't come into effect until we release the
40 * system_transition_mutex lock, since the freezer always works with
41 * system_transition_mutex held.
43 * More importantly, in the case of hibernation,
44 * unlock_system_sleep() gets called in snapshot_read() and
45 * snapshot_write() when the freezing condition is still in effect.
46 * Which means, if we use try_to_freeze() here, it would make them
47 * enter the refrigerator, thus causing hibernation to lockup.
49 current->flags &= ~PF_FREEZER_SKIP;
50 mutex_unlock(&system_transition_mutex);
52 EXPORT_SYMBOL_GPL(unlock_system_sleep);
54 void ksys_sync_helper(void)
61 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
62 pr_info("Filesystems sync: %ld.%03ld seconds\n",
63 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
65 EXPORT_SYMBOL_GPL(ksys_sync_helper);
67 /* Routines for PM-transition notifications */
69 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
71 int register_pm_notifier(struct notifier_block *nb)
73 return blocking_notifier_chain_register(&pm_chain_head, nb);
75 EXPORT_SYMBOL_GPL(register_pm_notifier);
77 int unregister_pm_notifier(struct notifier_block *nb)
79 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
81 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
83 int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
87 ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
88 nr_to_call, nr_calls);
90 return notifier_to_errno(ret);
92 int pm_notifier_call_chain(unsigned long val)
94 return __pm_notifier_call_chain(val, -1, NULL);
97 /* If set, devices may be suspended and resumed asynchronously. */
98 int pm_async_enabled = 1;
100 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
103 return sprintf(buf, "%d\n", pm_async_enabled);
106 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
107 const char *buf, size_t n)
111 if (kstrtoul(buf, 10, &val))
117 pm_async_enabled = val;
121 power_attr(pm_async);
123 #ifdef CONFIG_SUSPEND
124 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
130 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
131 if (mem_sleep_states[i]) {
132 const char *label = mem_sleep_states[i];
134 if (mem_sleep_current == i)
135 s += sprintf(s, "[%s] ", label);
137 s += sprintf(s, "%s ", label);
140 /* Convert the last space to a newline if needed. */
147 static suspend_state_t decode_suspend_state(const char *buf, size_t n)
149 suspend_state_t state;
153 p = memchr(buf, '\n', n);
154 len = p ? p - buf : n;
156 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
157 const char *label = mem_sleep_states[state];
159 if (label && len == strlen(label) && !strncmp(buf, label, len))
163 return PM_SUSPEND_ON;
166 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
167 const char *buf, size_t n)
169 suspend_state_t state;
172 error = pm_autosleep_lock();
176 if (pm_autosleep_state() > PM_SUSPEND_ON) {
181 state = decode_suspend_state(buf, n);
182 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON)
183 mem_sleep_current = state;
188 pm_autosleep_unlock();
189 return error ? error : n;
192 power_attr(mem_sleep);
193 #endif /* CONFIG_SUSPEND */
195 #ifdef CONFIG_PM_SLEEP_DEBUG
196 int pm_test_level = TEST_NONE;
198 static const char * const pm_tests[__TEST_AFTER_LAST] = {
199 [TEST_NONE] = "none",
200 [TEST_CORE] = "core",
201 [TEST_CPUS] = "processors",
202 [TEST_PLATFORM] = "platform",
203 [TEST_DEVICES] = "devices",
204 [TEST_FREEZER] = "freezer",
207 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
213 for (level = TEST_FIRST; level <= TEST_MAX; level++)
214 if (pm_tests[level]) {
215 if (level == pm_test_level)
216 s += sprintf(s, "[%s] ", pm_tests[level]);
218 s += sprintf(s, "%s ", pm_tests[level]);
222 /* convert the last space to a newline */
228 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
229 const char *buf, size_t n)
231 const char * const *s;
237 p = memchr(buf, '\n', n);
238 len = p ? p - buf : n;
243 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
244 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
245 pm_test_level = level;
250 unlock_system_sleep();
252 return error ? error : n;
256 #endif /* CONFIG_PM_SLEEP_DEBUG */
258 static char *suspend_step_name(enum suspend_stat_step step)
263 case SUSPEND_PREPARE:
265 case SUSPEND_SUSPEND:
267 case SUSPEND_SUSPEND_NOIRQ:
268 return "suspend_noirq";
269 case SUSPEND_RESUME_NOIRQ:
270 return "resume_noirq";
278 #define suspend_attr(_name) \
279 static ssize_t _name##_show(struct kobject *kobj, \
280 struct kobj_attribute *attr, char *buf) \
282 return sprintf(buf, "%d\n", suspend_stats._name); \
284 static struct kobj_attribute _name = __ATTR_RO(_name)
286 suspend_attr(success);
288 suspend_attr(failed_freeze);
289 suspend_attr(failed_prepare);
290 suspend_attr(failed_suspend);
291 suspend_attr(failed_suspend_late);
292 suspend_attr(failed_suspend_noirq);
293 suspend_attr(failed_resume);
294 suspend_attr(failed_resume_early);
295 suspend_attr(failed_resume_noirq);
297 static ssize_t last_failed_dev_show(struct kobject *kobj,
298 struct kobj_attribute *attr, char *buf)
301 char *last_failed_dev = NULL;
303 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
304 index %= REC_FAILED_NUM;
305 last_failed_dev = suspend_stats.failed_devs[index];
307 return sprintf(buf, "%s\n", last_failed_dev);
309 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
311 static ssize_t last_failed_errno_show(struct kobject *kobj,
312 struct kobj_attribute *attr, char *buf)
315 int last_failed_errno;
317 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
318 index %= REC_FAILED_NUM;
319 last_failed_errno = suspend_stats.errno[index];
321 return sprintf(buf, "%d\n", last_failed_errno);
323 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
325 static ssize_t last_failed_step_show(struct kobject *kobj,
326 struct kobj_attribute *attr, char *buf)
329 enum suspend_stat_step step;
330 char *last_failed_step = NULL;
332 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
333 index %= REC_FAILED_NUM;
334 step = suspend_stats.failed_steps[index];
335 last_failed_step = suspend_step_name(step);
337 return sprintf(buf, "%s\n", last_failed_step);
339 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
341 static struct attribute *suspend_attrs[] = {
345 &failed_prepare.attr,
346 &failed_suspend.attr,
347 &failed_suspend_late.attr,
348 &failed_suspend_noirq.attr,
350 &failed_resume_early.attr,
351 &failed_resume_noirq.attr,
352 &last_failed_dev.attr,
353 &last_failed_errno.attr,
354 &last_failed_step.attr,
358 static struct attribute_group suspend_attr_group = {
359 .name = "suspend_stats",
360 .attrs = suspend_attrs,
363 #ifdef CONFIG_DEBUG_FS
364 static int suspend_stats_show(struct seq_file *s, void *unused)
366 int i, index, last_dev, last_errno, last_step;
368 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
369 last_dev %= REC_FAILED_NUM;
370 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
371 last_errno %= REC_FAILED_NUM;
372 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
373 last_step %= REC_FAILED_NUM;
374 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
375 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
376 "success", suspend_stats.success,
377 "fail", suspend_stats.fail,
378 "failed_freeze", suspend_stats.failed_freeze,
379 "failed_prepare", suspend_stats.failed_prepare,
380 "failed_suspend", suspend_stats.failed_suspend,
381 "failed_suspend_late",
382 suspend_stats.failed_suspend_late,
383 "failed_suspend_noirq",
384 suspend_stats.failed_suspend_noirq,
385 "failed_resume", suspend_stats.failed_resume,
386 "failed_resume_early",
387 suspend_stats.failed_resume_early,
388 "failed_resume_noirq",
389 suspend_stats.failed_resume_noirq);
390 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
391 suspend_stats.failed_devs[last_dev]);
392 for (i = 1; i < REC_FAILED_NUM; i++) {
393 index = last_dev + REC_FAILED_NUM - i;
394 index %= REC_FAILED_NUM;
395 seq_printf(s, "\t\t\t%-s\n",
396 suspend_stats.failed_devs[index]);
398 seq_printf(s, " last_failed_errno:\t%-d\n",
399 suspend_stats.errno[last_errno]);
400 for (i = 1; i < REC_FAILED_NUM; i++) {
401 index = last_errno + REC_FAILED_NUM - i;
402 index %= REC_FAILED_NUM;
403 seq_printf(s, "\t\t\t%-d\n",
404 suspend_stats.errno[index]);
406 seq_printf(s, " last_failed_step:\t%-s\n",
408 suspend_stats.failed_steps[last_step]));
409 for (i = 1; i < REC_FAILED_NUM; i++) {
410 index = last_step + REC_FAILED_NUM - i;
411 index %= REC_FAILED_NUM;
412 seq_printf(s, "\t\t\t%-s\n",
414 suspend_stats.failed_steps[index]));
419 DEFINE_SHOW_ATTRIBUTE(suspend_stats);
421 static int __init pm_debugfs_init(void)
423 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
424 NULL, NULL, &suspend_stats_fops);
428 late_initcall(pm_debugfs_init);
429 #endif /* CONFIG_DEBUG_FS */
431 #endif /* CONFIG_PM_SLEEP */
433 #ifdef CONFIG_PM_SLEEP_DEBUG
435 * pm_print_times: print time taken by devices to suspend and resume.
437 * show() returns whether printing of suspend and resume times is enabled.
438 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
440 bool pm_print_times_enabled;
442 static ssize_t pm_print_times_show(struct kobject *kobj,
443 struct kobj_attribute *attr, char *buf)
445 return sprintf(buf, "%d\n", pm_print_times_enabled);
448 static ssize_t pm_print_times_store(struct kobject *kobj,
449 struct kobj_attribute *attr,
450 const char *buf, size_t n)
454 if (kstrtoul(buf, 10, &val))
460 pm_print_times_enabled = !!val;
464 power_attr(pm_print_times);
466 static inline void pm_print_times_init(void)
468 pm_print_times_enabled = !!initcall_debug;
471 static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
472 struct kobj_attribute *attr,
475 if (!pm_wakeup_irq())
478 return sprintf(buf, "%u\n", pm_wakeup_irq());
481 power_attr_ro(pm_wakeup_irq);
483 bool pm_debug_messages_on __read_mostly;
485 static ssize_t pm_debug_messages_show(struct kobject *kobj,
486 struct kobj_attribute *attr, char *buf)
488 return sprintf(buf, "%d\n", pm_debug_messages_on);
491 static ssize_t pm_debug_messages_store(struct kobject *kobj,
492 struct kobj_attribute *attr,
493 const char *buf, size_t n)
497 if (kstrtoul(buf, 10, &val))
503 pm_debug_messages_on = !!val;
507 power_attr(pm_debug_messages);
510 * __pm_pr_dbg - Print a suspend debug message to the kernel log.
511 * @defer: Whether or not to use printk_deferred() to print the message.
512 * @fmt: Message format.
514 * The message will be emitted if enabled through the pm_debug_messages
517 void __pm_pr_dbg(bool defer, const char *fmt, ...)
519 struct va_format vaf;
522 if (!pm_debug_messages_on)
531 printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
533 printk(KERN_DEBUG "PM: %pV", &vaf);
538 #else /* !CONFIG_PM_SLEEP_DEBUG */
539 static inline void pm_print_times_init(void) {}
540 #endif /* CONFIG_PM_SLEEP_DEBUG */
542 struct kobject *power_kobj;
545 * state - control system sleep states.
547 * show() returns available sleep state labels, which may be "mem", "standby",
548 * "freeze" and "disk" (hibernation).
549 * See Documentation/admin-guide/pm/sleep-states.rst for a description of
552 * store() accepts one of those strings, translates it into the proper
553 * enumerated value, and initiates a suspend transition.
555 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
559 #ifdef CONFIG_SUSPEND
562 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
564 s += sprintf(s,"%s ", pm_states[i]);
567 if (hibernation_available())
568 s += sprintf(s, "disk ");
570 /* convert the last space to a newline */
575 static suspend_state_t decode_state(const char *buf, size_t n)
577 #ifdef CONFIG_SUSPEND
578 suspend_state_t state;
583 p = memchr(buf, '\n', n);
584 len = p ? p - buf : n;
586 /* Check hibernation first. */
587 if (len == 4 && str_has_prefix(buf, "disk"))
588 return PM_SUSPEND_MAX;
590 #ifdef CONFIG_SUSPEND
591 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
592 const char *label = pm_states[state];
594 if (label && len == strlen(label) && !strncmp(buf, label, len))
599 return PM_SUSPEND_ON;
602 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
603 const char *buf, size_t n)
605 suspend_state_t state;
608 error = pm_autosleep_lock();
612 if (pm_autosleep_state() > PM_SUSPEND_ON) {
617 state = decode_state(buf, n);
618 if (state < PM_SUSPEND_MAX) {
619 if (state == PM_SUSPEND_MEM)
620 state = mem_sleep_current;
622 error = pm_suspend(state);
623 } else if (state == PM_SUSPEND_MAX) {
630 pm_autosleep_unlock();
631 return error ? error : n;
636 #ifdef CONFIG_PM_SLEEP
638 * The 'wakeup_count' attribute, along with the functions defined in
639 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
640 * handled in a non-racy way.
642 * If a wakeup event occurs when the system is in a sleep state, it simply is
643 * woken up. In turn, if an event that would wake the system up from a sleep
644 * state occurs when it is undergoing a transition to that sleep state, the
645 * transition should be aborted. Moreover, if such an event occurs when the
646 * system is in the working state, an attempt to start a transition to the
647 * given sleep state should fail during certain period after the detection of
648 * the event. Using the 'state' attribute alone is not sufficient to satisfy
649 * these requirements, because a wakeup event may occur exactly when 'state'
650 * is being written to and may be delivered to user space right before it is
651 * frozen, so the event will remain only partially processed until the system is
652 * woken up by another event. In particular, it won't cause the transition to
653 * a sleep state to be aborted.
655 * This difficulty may be overcome if user space uses 'wakeup_count' before
656 * writing to 'state'. It first should read from 'wakeup_count' and store
657 * the read value. Then, after carrying out its own preparations for the system
658 * transition to a sleep state, it should write the stored value to
659 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
660 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
661 * is allowed to write to 'state', but the transition will be aborted if there
662 * are any wakeup events detected after 'wakeup_count' was written to.
665 static ssize_t wakeup_count_show(struct kobject *kobj,
666 struct kobj_attribute *attr,
671 return pm_get_wakeup_count(&val, true) ?
672 sprintf(buf, "%u\n", val) : -EINTR;
675 static ssize_t wakeup_count_store(struct kobject *kobj,
676 struct kobj_attribute *attr,
677 const char *buf, size_t n)
682 error = pm_autosleep_lock();
686 if (pm_autosleep_state() > PM_SUSPEND_ON) {
692 if (sscanf(buf, "%u", &val) == 1) {
693 if (pm_save_wakeup_count(val))
696 pm_print_active_wakeup_sources();
700 pm_autosleep_unlock();
704 power_attr(wakeup_count);
706 #ifdef CONFIG_PM_AUTOSLEEP
707 static ssize_t autosleep_show(struct kobject *kobj,
708 struct kobj_attribute *attr,
711 suspend_state_t state = pm_autosleep_state();
713 if (state == PM_SUSPEND_ON)
714 return sprintf(buf, "off\n");
716 #ifdef CONFIG_SUSPEND
717 if (state < PM_SUSPEND_MAX)
718 return sprintf(buf, "%s\n", pm_states[state] ?
719 pm_states[state] : "error");
721 #ifdef CONFIG_HIBERNATION
722 return sprintf(buf, "disk\n");
724 return sprintf(buf, "error");
728 static ssize_t autosleep_store(struct kobject *kobj,
729 struct kobj_attribute *attr,
730 const char *buf, size_t n)
732 suspend_state_t state = decode_state(buf, n);
735 if (state == PM_SUSPEND_ON
736 && strcmp(buf, "off") && strcmp(buf, "off\n"))
739 if (state == PM_SUSPEND_MEM)
740 state = mem_sleep_current;
742 error = pm_autosleep_set_state(state);
743 return error ? error : n;
746 power_attr(autosleep);
747 #endif /* CONFIG_PM_AUTOSLEEP */
749 #ifdef CONFIG_PM_WAKELOCKS
750 static ssize_t wake_lock_show(struct kobject *kobj,
751 struct kobj_attribute *attr,
754 return pm_show_wakelocks(buf, true);
757 static ssize_t wake_lock_store(struct kobject *kobj,
758 struct kobj_attribute *attr,
759 const char *buf, size_t n)
761 int error = pm_wake_lock(buf);
762 return error ? error : n;
765 power_attr(wake_lock);
767 static ssize_t wake_unlock_show(struct kobject *kobj,
768 struct kobj_attribute *attr,
771 return pm_show_wakelocks(buf, false);
774 static ssize_t wake_unlock_store(struct kobject *kobj,
775 struct kobj_attribute *attr,
776 const char *buf, size_t n)
778 int error = pm_wake_unlock(buf);
779 return error ? error : n;
782 power_attr(wake_unlock);
784 #endif /* CONFIG_PM_WAKELOCKS */
785 #endif /* CONFIG_PM_SLEEP */
787 #ifdef CONFIG_PM_TRACE
788 int pm_trace_enabled;
790 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
793 return sprintf(buf, "%d\n", pm_trace_enabled);
797 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
798 const char *buf, size_t n)
802 if (sscanf(buf, "%d", &val) == 1) {
803 pm_trace_enabled = !!val;
804 if (pm_trace_enabled) {
805 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
806 "PM: Correct system time has to be restored manually after resume.\n");
813 power_attr(pm_trace);
815 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
816 struct kobj_attribute *attr,
819 return show_trace_dev_match(buf, PAGE_SIZE);
822 power_attr_ro(pm_trace_dev_match);
824 #endif /* CONFIG_PM_TRACE */
826 #ifdef CONFIG_FREEZER
827 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
828 struct kobj_attribute *attr, char *buf)
830 return sprintf(buf, "%u\n", freeze_timeout_msecs);
833 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
834 struct kobj_attribute *attr,
835 const char *buf, size_t n)
839 if (kstrtoul(buf, 10, &val))
842 freeze_timeout_msecs = val;
846 power_attr(pm_freeze_timeout);
848 #endif /* CONFIG_FREEZER*/
850 static struct attribute * g[] = {
852 #ifdef CONFIG_PM_TRACE
854 &pm_trace_dev_match_attr.attr,
856 #ifdef CONFIG_PM_SLEEP
858 &wakeup_count_attr.attr,
859 #ifdef CONFIG_SUSPEND
860 &mem_sleep_attr.attr,
862 #ifdef CONFIG_PM_AUTOSLEEP
863 &autosleep_attr.attr,
865 #ifdef CONFIG_PM_WAKELOCKS
866 &wake_lock_attr.attr,
867 &wake_unlock_attr.attr,
869 #ifdef CONFIG_PM_SLEEP_DEBUG
871 &pm_print_times_attr.attr,
872 &pm_wakeup_irq_attr.attr,
873 &pm_debug_messages_attr.attr,
876 #ifdef CONFIG_FREEZER
877 &pm_freeze_timeout_attr.attr,
882 static const struct attribute_group attr_group = {
886 static const struct attribute_group *attr_groups[] = {
888 #ifdef CONFIG_PM_SLEEP
894 struct workqueue_struct *pm_wq;
895 EXPORT_SYMBOL_GPL(pm_wq);
897 static int __init pm_start_workqueue(void)
899 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
901 return pm_wq ? 0 : -ENOMEM;
904 static int __init pm_init(void)
906 int error = pm_start_workqueue();
909 hibernate_image_size_init();
910 hibernate_reserved_size_init();
912 power_kobj = kobject_create_and_add("power", NULL);
915 error = sysfs_create_groups(power_kobj, attr_groups);
918 pm_print_times_init();
919 return pm_autosleep_init();
922 core_initcall(pm_init);