2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
32 __routine = genpd->dev_ops.callback; \
34 __ret = __routine(dev); \
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
42 struct genpd_lock_ops {
43 void (*lock)(struct generic_pm_domain *genpd);
44 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
45 int (*lock_interruptible)(struct generic_pm_domain *genpd);
46 void (*unlock)(struct generic_pm_domain *genpd);
49 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
51 mutex_lock(&genpd->mlock);
54 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 mutex_lock_nested(&genpd->mlock, depth);
60 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
62 return mutex_lock_interruptible(&genpd->mlock);
65 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
67 return mutex_unlock(&genpd->mlock);
70 static const struct genpd_lock_ops genpd_mtx_ops = {
71 .lock = genpd_lock_mtx,
72 .lock_nested = genpd_lock_nested_mtx,
73 .lock_interruptible = genpd_lock_interruptible_mtx,
74 .unlock = genpd_unlock_mtx,
77 static void genpd_lock_spin(struct generic_pm_domain *genpd)
78 __acquires(&genpd->slock)
82 spin_lock_irqsave(&genpd->slock, flags);
83 genpd->lock_flags = flags;
86 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
88 __acquires(&genpd->slock)
92 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
93 genpd->lock_flags = flags;
96 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
97 __acquires(&genpd->slock)
101 spin_lock_irqsave(&genpd->slock, flags);
102 genpd->lock_flags = flags;
106 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
107 __releases(&genpd->slock)
109 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112 static const struct genpd_lock_ops genpd_spin_ops = {
113 .lock = genpd_lock_spin,
114 .lock_nested = genpd_lock_nested_spin,
115 .lock_interruptible = genpd_lock_interruptible_spin,
116 .unlock = genpd_unlock_spin,
119 #define genpd_lock(p) p->lock_ops->lock(p)
120 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
121 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
122 #define genpd_unlock(p) p->lock_ops->unlock(p)
124 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
125 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
126 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
129 const struct generic_pm_domain *genpd)
133 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
136 * Warn once if an IRQ safe device is attached to a no sleep domain, as
137 * to indicate a suboptimal configuration for PM. For an always on
138 * domain this isn't case, thus don't warn.
140 if (ret && !genpd_is_always_on(genpd))
141 dev_warn_once(dev, "PM domain %s will not be powered off\n",
148 * Get the generic PM domain for a particular struct device.
149 * This validates the struct device pointer, the PM domain pointer,
150 * and checks that the PM domain pointer is a real generic PM domain.
151 * Any failure results in NULL being returned.
153 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
155 struct generic_pm_domain *genpd = NULL, *gpd;
157 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
160 mutex_lock(&gpd_list_lock);
161 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
162 if (&gpd->domain == dev->pm_domain) {
167 mutex_unlock(&gpd_list_lock);
173 * This should only be used where we are certain that the pm_domain
174 * attached to the device is a genpd domain.
176 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
181 return pd_to_genpd(dev->pm_domain);
184 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
190 static int genpd_start_dev(const struct generic_pm_domain *genpd,
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
196 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
206 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208 atomic_inc(&genpd->sd_count);
209 smp_mb__after_atomic();
212 #ifdef CONFIG_DEBUG_FS
213 static void genpd_update_accounting(struct generic_pm_domain *genpd)
218 delta = ktime_sub(now, genpd->accounting_time);
221 * If genpd->status is active, it means we are just
222 * out of off and so update the idle time and vice
225 if (genpd->status == GPD_STATE_ACTIVE) {
226 int state_idx = genpd->state_idx;
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
231 genpd->on_time = ktime_add(genpd->on_time, delta);
234 genpd->accounting_time = now;
237 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
240 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
242 unsigned int state_idx = genpd->state_idx;
247 if (!genpd->power_on)
251 return genpd->power_on(genpd);
253 time_start = ktime_get();
254 ret = genpd->power_on(genpd);
258 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
259 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
262 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
263 genpd->max_off_time_changed = true;
264 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
265 genpd->name, "on", elapsed_ns);
270 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
272 unsigned int state_idx = genpd->state_idx;
277 if (!genpd->power_off)
281 return genpd->power_off(genpd);
283 time_start = ktime_get();
284 ret = genpd->power_off(genpd);
288 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
289 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
292 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
293 genpd->max_off_time_changed = true;
294 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
295 genpd->name, "off", elapsed_ns);
301 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
302 * @genpd: PM domain to power off.
304 * Queue up the execution of genpd_power_off() unless it's already been done
307 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
309 queue_work(pm_wq, &genpd->power_off_work);
313 * genpd_power_off - Remove power from a given PM domain.
314 * @genpd: PM domain to power down.
315 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
316 * RPM status of the releated device is in an intermediate state, not yet turned
317 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
318 * be RPM_SUSPENDED, while it tries to power off the PM domain.
320 * If all of the @genpd's devices have been suspended and all of its subdomains
321 * have been powered down, remove power from @genpd.
323 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
326 struct pm_domain_data *pdd;
327 struct gpd_link *link;
328 unsigned int not_suspended = 0;
331 * Do not try to power off the domain in the following situations:
332 * (1) The domain is already in the "power off" state.
333 * (2) System suspend is in progress.
335 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
339 * Abort power off for the PM domain in the following situations:
340 * (1) The domain is configured as always on.
341 * (2) When the domain has a subdomain being powered on.
343 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
346 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
347 enum pm_qos_flags_status stat;
349 stat = dev_pm_qos_flags(pdd->dev,
350 PM_QOS_FLAG_NO_POWER_OFF
351 | PM_QOS_FLAG_REMOTE_WAKEUP);
352 if (stat > PM_QOS_FLAGS_NONE)
356 * Do not allow PM domain to be powered off, when an IRQ safe
357 * device is part of a non-IRQ safe domain.
359 if (!pm_runtime_suspended(pdd->dev) ||
360 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
364 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
367 if (genpd->gov && genpd->gov->power_down_ok) {
368 if (!genpd->gov->power_down_ok(&genpd->domain))
372 /* Default to shallowest state. */
374 genpd->state_idx = 0;
376 if (genpd->power_off) {
379 if (atomic_read(&genpd->sd_count) > 0)
383 * If sd_count > 0 at this point, one of the subdomains hasn't
384 * managed to call genpd_power_on() for the master yet after
385 * incrementing it. In that case genpd_power_on() will wait
386 * for us to drop the lock, so we can call .power_off() and let
387 * the genpd_power_on() restore power for us (this shouldn't
388 * happen very often).
390 ret = _genpd_power_off(genpd, true);
395 genpd->status = GPD_STATE_POWER_OFF;
396 genpd_update_accounting(genpd);
398 list_for_each_entry(link, &genpd->slave_links, slave_node) {
399 genpd_sd_counter_dec(link->master);
400 genpd_lock_nested(link->master, depth + 1);
401 genpd_power_off(link->master, false, depth + 1);
402 genpd_unlock(link->master);
409 * genpd_power_on - Restore power to a given PM domain and its masters.
410 * @genpd: PM domain to power up.
411 * @depth: nesting count for lockdep.
413 * Restore power to @genpd and all of its masters so that it is possible to
414 * resume a device belonging to it.
416 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
418 struct gpd_link *link;
421 if (genpd_status_on(genpd))
425 * The list is guaranteed not to change while the loop below is being
426 * executed, unless one of the masters' .power_on() callbacks fiddles
429 list_for_each_entry(link, &genpd->slave_links, slave_node) {
430 struct generic_pm_domain *master = link->master;
432 genpd_sd_counter_inc(master);
434 genpd_lock_nested(master, depth + 1);
435 ret = genpd_power_on(master, depth + 1);
436 genpd_unlock(master);
439 genpd_sd_counter_dec(master);
444 ret = _genpd_power_on(genpd, true);
448 genpd->status = GPD_STATE_ACTIVE;
449 genpd_update_accounting(genpd);
454 list_for_each_entry_continue_reverse(link,
457 genpd_sd_counter_dec(link->master);
458 genpd_lock_nested(link->master, depth + 1);
459 genpd_power_off(link->master, false, depth + 1);
460 genpd_unlock(link->master);
466 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
467 unsigned long val, void *ptr)
469 struct generic_pm_domain_data *gpd_data;
472 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
473 dev = gpd_data->base.dev;
476 struct generic_pm_domain *genpd;
477 struct pm_domain_data *pdd;
479 spin_lock_irq(&dev->power.lock);
481 pdd = dev->power.subsys_data ?
482 dev->power.subsys_data->domain_data : NULL;
484 to_gpd_data(pdd)->td.constraint_changed = true;
485 genpd = dev_to_genpd(dev);
487 genpd = ERR_PTR(-ENODATA);
490 spin_unlock_irq(&dev->power.lock);
492 if (!IS_ERR(genpd)) {
494 genpd->max_off_time_changed = true;
499 if (!dev || dev->power.ignore_children)
507 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
508 * @work: Work structure used for scheduling the execution of this function.
510 static void genpd_power_off_work_fn(struct work_struct *work)
512 struct generic_pm_domain *genpd;
514 genpd = container_of(work, struct generic_pm_domain, power_off_work);
517 genpd_power_off(genpd, false, 0);
522 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
523 * @dev: Device to handle.
525 static int __genpd_runtime_suspend(struct device *dev)
527 int (*cb)(struct device *__dev);
529 if (dev->type && dev->type->pm)
530 cb = dev->type->pm->runtime_suspend;
531 else if (dev->class && dev->class->pm)
532 cb = dev->class->pm->runtime_suspend;
533 else if (dev->bus && dev->bus->pm)
534 cb = dev->bus->pm->runtime_suspend;
538 if (!cb && dev->driver && dev->driver->pm)
539 cb = dev->driver->pm->runtime_suspend;
541 return cb ? cb(dev) : 0;
545 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
546 * @dev: Device to handle.
548 static int __genpd_runtime_resume(struct device *dev)
550 int (*cb)(struct device *__dev);
552 if (dev->type && dev->type->pm)
553 cb = dev->type->pm->runtime_resume;
554 else if (dev->class && dev->class->pm)
555 cb = dev->class->pm->runtime_resume;
556 else if (dev->bus && dev->bus->pm)
557 cb = dev->bus->pm->runtime_resume;
561 if (!cb && dev->driver && dev->driver->pm)
562 cb = dev->driver->pm->runtime_resume;
564 return cb ? cb(dev) : 0;
568 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
569 * @dev: Device to suspend.
571 * Carry out a runtime suspend of a device under the assumption that its
572 * pm_domain field points to the domain member of an object of type
573 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
575 static int genpd_runtime_suspend(struct device *dev)
577 struct generic_pm_domain *genpd;
578 bool (*suspend_ok)(struct device *__dev);
579 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
580 bool runtime_pm = pm_runtime_enabled(dev);
585 dev_dbg(dev, "%s()\n", __func__);
587 genpd = dev_to_genpd(dev);
592 * A runtime PM centric subsystem/driver may re-use the runtime PM
593 * callbacks for other purposes than runtime PM. In those scenarios
594 * runtime PM is disabled. Under these circumstances, we shall skip
595 * validating/measuring the PM QoS latency.
597 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
598 if (runtime_pm && suspend_ok && !suspend_ok(dev))
601 /* Measure suspend latency. */
604 time_start = ktime_get();
606 ret = __genpd_runtime_suspend(dev);
610 ret = genpd_stop_dev(genpd, dev);
612 __genpd_runtime_resume(dev);
616 /* Update suspend latency value if the measured time exceeds it. */
618 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
619 if (elapsed_ns > td->suspend_latency_ns) {
620 td->suspend_latency_ns = elapsed_ns;
621 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
623 genpd->max_off_time_changed = true;
624 td->constraint_changed = true;
629 * If power.irq_safe is set, this routine may be run with
630 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
632 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
636 genpd_power_off(genpd, true, 0);
643 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
644 * @dev: Device to resume.
646 * Carry out a runtime resume of a device under the assumption that its
647 * pm_domain field points to the domain member of an object of type
648 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
650 static int genpd_runtime_resume(struct device *dev)
652 struct generic_pm_domain *genpd;
653 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
654 bool runtime_pm = pm_runtime_enabled(dev);
660 dev_dbg(dev, "%s()\n", __func__);
662 genpd = dev_to_genpd(dev);
667 * As we don't power off a non IRQ safe domain, which holds
668 * an IRQ safe device, we don't need to restore power to it.
670 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
676 ret = genpd_power_on(genpd, 0);
683 /* Measure resume latency. */
685 if (timed && runtime_pm)
686 time_start = ktime_get();
688 ret = genpd_start_dev(genpd, dev);
692 ret = __genpd_runtime_resume(dev);
696 /* Update resume latency value if the measured time exceeds it. */
697 if (timed && runtime_pm) {
698 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
699 if (elapsed_ns > td->resume_latency_ns) {
700 td->resume_latency_ns = elapsed_ns;
701 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
703 genpd->max_off_time_changed = true;
704 td->constraint_changed = true;
711 genpd_stop_dev(genpd, dev);
713 if (!pm_runtime_is_irq_safe(dev) ||
714 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
716 genpd_power_off(genpd, true, 0);
723 static bool pd_ignore_unused;
724 static int __init pd_ignore_unused_setup(char *__unused)
726 pd_ignore_unused = true;
729 __setup("pd_ignore_unused", pd_ignore_unused_setup);
732 * genpd_power_off_unused - Power off all PM domains with no devices in use.
734 static int __init genpd_power_off_unused(void)
736 struct generic_pm_domain *genpd;
738 if (pd_ignore_unused) {
739 pr_warn("genpd: Not disabling unused power domains\n");
743 mutex_lock(&gpd_list_lock);
745 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
746 genpd_queue_power_off_work(genpd);
748 mutex_unlock(&gpd_list_lock);
752 late_initcall(genpd_power_off_unused);
754 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
757 * pm_genpd_present - Check if the given PM domain has been initialized.
758 * @genpd: PM domain to check.
760 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
762 const struct generic_pm_domain *gpd;
764 if (IS_ERR_OR_NULL(genpd))
767 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
776 #ifdef CONFIG_PM_SLEEP
778 static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
781 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
785 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
786 * @genpd: PM domain to power off, if possible.
787 * @use_lock: use the lock.
788 * @depth: nesting count for lockdep.
790 * Check if the given PM domain can be powered off (during system suspend or
791 * hibernation) and do that if so. Also, in that case propagate to its masters.
793 * This function is only called in "noirq" and "syscore" stages of system power
794 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
795 * these cases the lock must be held.
797 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
800 struct gpd_link *link;
802 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
805 if (genpd->suspended_count != genpd->device_count
806 || atomic_read(&genpd->sd_count) > 0)
809 /* Choose the deepest state when suspending */
810 genpd->state_idx = genpd->state_count - 1;
811 if (_genpd_power_off(genpd, false))
814 genpd->status = GPD_STATE_POWER_OFF;
816 list_for_each_entry(link, &genpd->slave_links, slave_node) {
817 genpd_sd_counter_dec(link->master);
820 genpd_lock_nested(link->master, depth + 1);
822 genpd_sync_power_off(link->master, use_lock, depth + 1);
825 genpd_unlock(link->master);
830 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
831 * @genpd: PM domain to power on.
832 * @use_lock: use the lock.
833 * @depth: nesting count for lockdep.
835 * This function is only called in "noirq" and "syscore" stages of system power
836 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
837 * these cases the lock must be held.
839 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
842 struct gpd_link *link;
844 if (genpd_status_on(genpd))
847 list_for_each_entry(link, &genpd->slave_links, slave_node) {
848 genpd_sd_counter_inc(link->master);
851 genpd_lock_nested(link->master, depth + 1);
853 genpd_sync_power_on(link->master, use_lock, depth + 1);
856 genpd_unlock(link->master);
859 _genpd_power_on(genpd, false);
861 genpd->status = GPD_STATE_ACTIVE;
865 * resume_needed - Check whether to resume a device before system suspend.
866 * @dev: Device to check.
867 * @genpd: PM domain the device belongs to.
869 * There are two cases in which a device that can wake up the system from sleep
870 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
871 * to wake up the system and it has to remain active for this purpose while the
872 * system is in the sleep state and (2) if the device is not enabled to wake up
873 * the system from sleep states and it generally doesn't generate wakeup signals
874 * by itself (those signals are generated on its behalf by other parts of the
875 * system). In the latter case it may be necessary to reconfigure the device's
876 * wakeup settings during system suspend, because it may have been set up to
877 * signal remote wakeup from the system's working state as needed by runtime PM.
878 * Return 'true' in either of the above cases.
880 static bool resume_needed(struct device *dev,
881 const struct generic_pm_domain *genpd)
885 if (!device_can_wakeup(dev))
888 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
889 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
893 * pm_genpd_prepare - Start power transition of a device in a PM domain.
894 * @dev: Device to start the transition of.
896 * Start a power transition of a device (during a system-wide power transition)
897 * under the assumption that its pm_domain field points to the domain member of
898 * an object of type struct generic_pm_domain representing a PM domain
899 * consisting of I/O devices.
901 static int pm_genpd_prepare(struct device *dev)
903 struct generic_pm_domain *genpd;
906 dev_dbg(dev, "%s()\n", __func__);
908 genpd = dev_to_genpd(dev);
913 * If a wakeup request is pending for the device, it should be woken up
914 * at this point and a system wakeup event should be reported if it's
915 * set up to wake up the system from sleep states.
917 if (resume_needed(dev, genpd))
918 pm_runtime_resume(dev);
922 if (genpd->prepared_count++ == 0)
923 genpd->suspended_count = 0;
927 ret = pm_generic_prepare(dev);
931 genpd->prepared_count--;
936 /* Never return 1, as genpd don't cope with the direct_complete path. */
937 return ret >= 0 ? 0 : ret;
941 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
943 * @dev: Device to suspend.
944 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
946 * Stop the device and remove power from the domain if all devices in it have
949 static int genpd_finish_suspend(struct device *dev, bool poweroff)
951 struct generic_pm_domain *genpd;
954 genpd = dev_to_genpd(dev);
958 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
962 ret = pm_generic_poweroff_noirq(dev);
964 ret = pm_generic_suspend_noirq(dev);
968 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
969 ret = pm_runtime_force_suspend(dev);
975 genpd->suspended_count++;
976 genpd_sync_power_off(genpd, true, 0);
983 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
984 * @dev: Device to suspend.
986 * Stop the device and remove power from the domain if all devices in it have
989 static int pm_genpd_suspend_noirq(struct device *dev)
991 dev_dbg(dev, "%s()\n", __func__);
993 return genpd_finish_suspend(dev, false);
997 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
998 * @dev: Device to resume.
1000 * Restore power to the device's PM domain, if necessary, and start the device.
1002 static int pm_genpd_resume_noirq(struct device *dev)
1004 struct generic_pm_domain *genpd;
1007 dev_dbg(dev, "%s()\n", __func__);
1009 genpd = dev_to_genpd(dev);
1013 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
1017 genpd_sync_power_on(genpd, true, 0);
1018 genpd->suspended_count--;
1019 genpd_unlock(genpd);
1021 if (genpd->dev_ops.stop && genpd->dev_ops.start)
1022 ret = pm_runtime_force_resume(dev);
1024 ret = pm_generic_resume_noirq(dev);
1032 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1033 * @dev: Device to freeze.
1035 * Carry out a late freeze of a device under the assumption that its
1036 * pm_domain field points to the domain member of an object of type
1037 * struct generic_pm_domain representing a power domain consisting of I/O
1040 static int pm_genpd_freeze_noirq(struct device *dev)
1042 const struct generic_pm_domain *genpd;
1045 dev_dbg(dev, "%s()\n", __func__);
1047 genpd = dev_to_genpd(dev);
1051 ret = pm_generic_freeze_noirq(dev);
1055 if (genpd->dev_ops.stop && genpd->dev_ops.start)
1056 ret = pm_runtime_force_suspend(dev);
1062 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1063 * @dev: Device to thaw.
1065 * Start the device, unless power has been removed from the domain already
1066 * before the system transition.
1068 static int pm_genpd_thaw_noirq(struct device *dev)
1070 const struct generic_pm_domain *genpd;
1073 dev_dbg(dev, "%s()\n", __func__);
1075 genpd = dev_to_genpd(dev);
1079 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1080 ret = pm_runtime_force_resume(dev);
1085 return pm_generic_thaw_noirq(dev);
1089 * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
1091 * @dev: Device to poweroff.
1093 * Stop the device and remove power from the domain if all devices in it have
1096 static int pm_genpd_poweroff_noirq(struct device *dev)
1098 dev_dbg(dev, "%s()\n", __func__);
1100 return genpd_finish_suspend(dev, true);
1104 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1105 * @dev: Device to resume.
1107 * Make sure the domain will be in the same power state as before the
1108 * hibernation the system is resuming from and start the device if necessary.
1110 static int pm_genpd_restore_noirq(struct device *dev)
1112 struct generic_pm_domain *genpd;
1115 dev_dbg(dev, "%s()\n", __func__);
1117 genpd = dev_to_genpd(dev);
1122 * At this point suspended_count == 0 means we are being run for the
1123 * first time for the given domain in the present cycle.
1126 if (genpd->suspended_count++ == 0)
1128 * The boot kernel might put the domain into arbitrary state,
1129 * so make it appear as powered off to genpd_sync_power_on(),
1130 * so that it tries to power it on in case it was really off.
1132 genpd->status = GPD_STATE_POWER_OFF;
1134 genpd_sync_power_on(genpd, true, 0);
1135 genpd_unlock(genpd);
1137 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
1138 ret = pm_runtime_force_resume(dev);
1143 return pm_generic_restore_noirq(dev);
1147 * pm_genpd_complete - Complete power transition of a device in a power domain.
1148 * @dev: Device to complete the transition of.
1150 * Complete a power transition of a device (during a system-wide power
1151 * transition) under the assumption that its pm_domain field points to the
1152 * domain member of an object of type struct generic_pm_domain representing
1153 * a power domain consisting of I/O devices.
1155 static void pm_genpd_complete(struct device *dev)
1157 struct generic_pm_domain *genpd;
1159 dev_dbg(dev, "%s()\n", __func__);
1161 genpd = dev_to_genpd(dev);
1165 pm_generic_complete(dev);
1169 genpd->prepared_count--;
1170 if (!genpd->prepared_count)
1171 genpd_queue_power_off_work(genpd);
1173 genpd_unlock(genpd);
1177 * genpd_syscore_switch - Switch power during system core suspend or resume.
1178 * @dev: Device that normally is marked as "always on" to switch power for.
1180 * This routine may only be called during the system core (syscore) suspend or
1181 * resume phase for devices whose "always on" flags are set.
1183 static void genpd_syscore_switch(struct device *dev, bool suspend)
1185 struct generic_pm_domain *genpd;
1187 genpd = dev_to_genpd(dev);
1188 if (!pm_genpd_present(genpd))
1192 genpd->suspended_count++;
1193 genpd_sync_power_off(genpd, false, 0);
1195 genpd_sync_power_on(genpd, false, 0);
1196 genpd->suspended_count--;
1200 void pm_genpd_syscore_poweroff(struct device *dev)
1202 genpd_syscore_switch(dev, true);
1204 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1206 void pm_genpd_syscore_poweron(struct device *dev)
1208 genpd_syscore_switch(dev, false);
1210 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1212 #else /* !CONFIG_PM_SLEEP */
1214 #define pm_genpd_prepare NULL
1215 #define pm_genpd_suspend_noirq NULL
1216 #define pm_genpd_resume_noirq NULL
1217 #define pm_genpd_freeze_noirq NULL
1218 #define pm_genpd_thaw_noirq NULL
1219 #define pm_genpd_poweroff_noirq NULL
1220 #define pm_genpd_restore_noirq NULL
1221 #define pm_genpd_complete NULL
1223 #endif /* CONFIG_PM_SLEEP */
1225 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1226 struct generic_pm_domain *genpd,
1227 struct gpd_timing_data *td)
1229 struct generic_pm_domain_data *gpd_data;
1232 ret = dev_pm_get_subsys_data(dev);
1234 return ERR_PTR(ret);
1236 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1245 gpd_data->base.dev = dev;
1246 gpd_data->td.constraint_changed = true;
1247 gpd_data->td.effective_constraint_ns = -1;
1248 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1250 spin_lock_irq(&dev->power.lock);
1252 if (dev->power.subsys_data->domain_data) {
1257 dev->power.subsys_data->domain_data = &gpd_data->base;
1259 spin_unlock_irq(&dev->power.lock);
1264 spin_unlock_irq(&dev->power.lock);
1267 dev_pm_put_subsys_data(dev);
1268 return ERR_PTR(ret);
1271 static void genpd_free_dev_data(struct device *dev,
1272 struct generic_pm_domain_data *gpd_data)
1274 spin_lock_irq(&dev->power.lock);
1276 dev->power.subsys_data->domain_data = NULL;
1278 spin_unlock_irq(&dev->power.lock);
1281 dev_pm_put_subsys_data(dev);
1284 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1285 struct gpd_timing_data *td)
1287 struct generic_pm_domain_data *gpd_data;
1290 dev_dbg(dev, "%s()\n", __func__);
1292 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1295 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1296 if (IS_ERR(gpd_data))
1297 return PTR_ERR(gpd_data);
1301 if (genpd->prepared_count > 0) {
1306 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1310 dev_pm_domain_set(dev, &genpd->domain);
1312 genpd->device_count++;
1313 genpd->max_off_time_changed = true;
1315 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1318 genpd_unlock(genpd);
1321 genpd_free_dev_data(dev, gpd_data);
1323 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1329 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1330 * @genpd: PM domain to add the device to.
1331 * @dev: Device to be added.
1332 * @td: Set of PM QoS timing parameters to attach to the device.
1334 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1335 struct gpd_timing_data *td)
1339 mutex_lock(&gpd_list_lock);
1340 ret = genpd_add_device(genpd, dev, td);
1341 mutex_unlock(&gpd_list_lock);
1345 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1347 static int genpd_remove_device(struct generic_pm_domain *genpd,
1350 struct generic_pm_domain_data *gpd_data;
1351 struct pm_domain_data *pdd;
1354 dev_dbg(dev, "%s()\n", __func__);
1356 pdd = dev->power.subsys_data->domain_data;
1357 gpd_data = to_gpd_data(pdd);
1358 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1362 if (genpd->prepared_count > 0) {
1367 genpd->device_count--;
1368 genpd->max_off_time_changed = true;
1370 if (genpd->detach_dev)
1371 genpd->detach_dev(genpd, dev);
1373 dev_pm_domain_set(dev, NULL);
1375 list_del_init(&pdd->list_node);
1377 genpd_unlock(genpd);
1379 genpd_free_dev_data(dev, gpd_data);
1384 genpd_unlock(genpd);
1385 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1391 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1392 * @genpd: PM domain to remove the device from.
1393 * @dev: Device to be removed.
1395 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1398 if (!genpd || genpd != genpd_lookup_dev(dev))
1401 return genpd_remove_device(genpd, dev);
1403 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1405 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1406 struct generic_pm_domain *subdomain)
1408 struct gpd_link *link, *itr;
1411 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1412 || genpd == subdomain)
1416 * If the domain can be powered on/off in an IRQ safe
1417 * context, ensure that the subdomain can also be
1418 * powered on/off in that context.
1420 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1421 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1422 genpd->name, subdomain->name);
1426 link = kzalloc(sizeof(*link), GFP_KERNEL);
1430 genpd_lock(subdomain);
1431 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1433 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1438 list_for_each_entry(itr, &genpd->master_links, master_node) {
1439 if (itr->slave == subdomain && itr->master == genpd) {
1445 link->master = genpd;
1446 list_add_tail(&link->master_node, &genpd->master_links);
1447 link->slave = subdomain;
1448 list_add_tail(&link->slave_node, &subdomain->slave_links);
1449 if (genpd_status_on(subdomain))
1450 genpd_sd_counter_inc(genpd);
1453 genpd_unlock(genpd);
1454 genpd_unlock(subdomain);
1461 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1462 * @genpd: Master PM domain to add the subdomain to.
1463 * @subdomain: Subdomain to be added.
1465 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1466 struct generic_pm_domain *subdomain)
1470 mutex_lock(&gpd_list_lock);
1471 ret = genpd_add_subdomain(genpd, subdomain);
1472 mutex_unlock(&gpd_list_lock);
1476 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1479 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1480 * @genpd: Master PM domain to remove the subdomain from.
1481 * @subdomain: Subdomain to be removed.
1483 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1484 struct generic_pm_domain *subdomain)
1486 struct gpd_link *l, *link;
1489 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1492 genpd_lock(subdomain);
1493 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1495 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1496 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1502 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1503 if (link->slave != subdomain)
1506 list_del(&link->master_node);
1507 list_del(&link->slave_node);
1509 if (genpd_status_on(subdomain))
1510 genpd_sd_counter_dec(genpd);
1517 genpd_unlock(genpd);
1518 genpd_unlock(subdomain);
1522 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1524 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1526 struct genpd_power_state *state;
1528 state = kzalloc(sizeof(*state), GFP_KERNEL);
1532 genpd->states = state;
1533 genpd->state_count = 1;
1534 genpd->free = state;
1539 static void genpd_lock_init(struct generic_pm_domain *genpd)
1541 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1542 spin_lock_init(&genpd->slock);
1543 genpd->lock_ops = &genpd_spin_ops;
1545 mutex_init(&genpd->mlock);
1546 genpd->lock_ops = &genpd_mtx_ops;
1551 * pm_genpd_init - Initialize a generic I/O PM domain object.
1552 * @genpd: PM domain object to initialize.
1553 * @gov: PM domain governor to associate with the domain (may be NULL).
1554 * @is_off: Initial value of the domain's power_is_off field.
1556 * Returns 0 on successful initialization, else a negative error code.
1558 int pm_genpd_init(struct generic_pm_domain *genpd,
1559 struct dev_power_governor *gov, bool is_off)
1563 if (IS_ERR_OR_NULL(genpd))
1566 INIT_LIST_HEAD(&genpd->master_links);
1567 INIT_LIST_HEAD(&genpd->slave_links);
1568 INIT_LIST_HEAD(&genpd->dev_list);
1569 genpd_lock_init(genpd);
1571 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1572 atomic_set(&genpd->sd_count, 0);
1573 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1574 genpd->device_count = 0;
1575 genpd->max_off_time_ns = -1;
1576 genpd->max_off_time_changed = true;
1577 genpd->provider = NULL;
1578 genpd->has_provider = false;
1579 genpd->accounting_time = ktime_get();
1580 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1581 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1582 genpd->domain.ops.prepare = pm_genpd_prepare;
1583 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1584 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1585 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1586 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1587 genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
1588 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1589 genpd->domain.ops.complete = pm_genpd_complete;
1591 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1592 genpd->dev_ops.stop = pm_clk_suspend;
1593 genpd->dev_ops.start = pm_clk_resume;
1596 /* Always-on domains must be powered on at initialization. */
1597 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
1600 /* Use only one "off" state if there were no states declared */
1601 if (genpd->state_count == 0) {
1602 ret = genpd_set_default_power_state(genpd);
1606 pr_warn("%s : no governor for states\n", genpd->name);
1609 mutex_lock(&gpd_list_lock);
1610 list_add(&genpd->gpd_list_node, &gpd_list);
1611 mutex_unlock(&gpd_list_lock);
1615 EXPORT_SYMBOL_GPL(pm_genpd_init);
1617 static int genpd_remove(struct generic_pm_domain *genpd)
1619 struct gpd_link *l, *link;
1621 if (IS_ERR_OR_NULL(genpd))
1626 if (genpd->has_provider) {
1627 genpd_unlock(genpd);
1628 pr_err("Provider present, unable to remove %s\n", genpd->name);
1632 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1633 genpd_unlock(genpd);
1634 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1638 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1639 list_del(&link->master_node);
1640 list_del(&link->slave_node);
1644 list_del(&genpd->gpd_list_node);
1645 genpd_unlock(genpd);
1646 cancel_work_sync(&genpd->power_off_work);
1648 pr_debug("%s: removed %s\n", __func__, genpd->name);
1654 * pm_genpd_remove - Remove a generic I/O PM domain
1655 * @genpd: Pointer to PM domain that is to be removed.
1657 * To remove the PM domain, this function:
1658 * - Removes the PM domain as a subdomain to any parent domains,
1660 * - Removes the PM domain from the list of registered PM domains.
1662 * The PM domain will only be removed, if the associated provider has
1663 * been removed, it is not a parent to any other PM domain and has no
1664 * devices associated with it.
1666 int pm_genpd_remove(struct generic_pm_domain *genpd)
1670 mutex_lock(&gpd_list_lock);
1671 ret = genpd_remove(genpd);
1672 mutex_unlock(&gpd_list_lock);
1676 EXPORT_SYMBOL_GPL(pm_genpd_remove);
1678 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1681 * Device Tree based PM domain providers.
1683 * The code below implements generic device tree based PM domain providers that
1684 * bind device tree nodes with generic PM domains registered in the system.
1686 * Any driver that registers generic PM domains and needs to support binding of
1687 * devices to these domains is supposed to register a PM domain provider, which
1688 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1690 * Two simple mapping functions have been provided for convenience:
1691 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1692 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1697 * struct of_genpd_provider - PM domain provider registration structure
1698 * @link: Entry in global list of PM domain providers
1699 * @node: Pointer to device tree node of PM domain provider
1700 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1702 * @data: context pointer to be passed into @xlate callback
1704 struct of_genpd_provider {
1705 struct list_head link;
1706 struct device_node *node;
1707 genpd_xlate_t xlate;
1711 /* List of registered PM domain providers. */
1712 static LIST_HEAD(of_genpd_providers);
1713 /* Mutex to protect the list above. */
1714 static DEFINE_MUTEX(of_genpd_mutex);
1717 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1718 * @genpdspec: OF phandle args to map into a PM domain
1719 * @data: xlate function private data - pointer to struct generic_pm_domain
1721 * This is a generic xlate function that can be used to model PM domains that
1722 * have their own device tree nodes. The private data of xlate function needs
1723 * to be a valid pointer to struct generic_pm_domain.
1725 static struct generic_pm_domain *genpd_xlate_simple(
1726 struct of_phandle_args *genpdspec,
1733 * genpd_xlate_onecell() - Xlate function using a single index.
1734 * @genpdspec: OF phandle args to map into a PM domain
1735 * @data: xlate function private data - pointer to struct genpd_onecell_data
1737 * This is a generic xlate function that can be used to model simple PM domain
1738 * controllers that have one device tree node and provide multiple PM domains.
1739 * A single cell is used as an index into an array of PM domains specified in
1740 * the genpd_onecell_data struct when registering the provider.
1742 static struct generic_pm_domain *genpd_xlate_onecell(
1743 struct of_phandle_args *genpdspec,
1746 struct genpd_onecell_data *genpd_data = data;
1747 unsigned int idx = genpdspec->args[0];
1749 if (genpdspec->args_count != 1)
1750 return ERR_PTR(-EINVAL);
1752 if (idx >= genpd_data->num_domains) {
1753 pr_err("%s: invalid domain index %u\n", __func__, idx);
1754 return ERR_PTR(-EINVAL);
1757 if (!genpd_data->domains[idx])
1758 return ERR_PTR(-ENOENT);
1760 return genpd_data->domains[idx];
1764 * genpd_add_provider() - Register a PM domain provider for a node
1765 * @np: Device node pointer associated with the PM domain provider.
1766 * @xlate: Callback for decoding PM domain from phandle arguments.
1767 * @data: Context pointer for @xlate callback.
1769 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1772 struct of_genpd_provider *cp;
1774 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1778 cp->node = of_node_get(np);
1782 mutex_lock(&of_genpd_mutex);
1783 list_add(&cp->link, &of_genpd_providers);
1784 mutex_unlock(&of_genpd_mutex);
1785 pr_debug("Added domain provider from %pOF\n", np);
1791 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1792 * @np: Device node pointer associated with the PM domain provider.
1793 * @genpd: Pointer to PM domain associated with the PM domain provider.
1795 int of_genpd_add_provider_simple(struct device_node *np,
1796 struct generic_pm_domain *genpd)
1803 mutex_lock(&gpd_list_lock);
1805 if (pm_genpd_present(genpd)) {
1806 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1808 genpd->provider = &np->fwnode;
1809 genpd->has_provider = true;
1813 mutex_unlock(&gpd_list_lock);
1817 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1820 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1821 * @np: Device node pointer associated with the PM domain provider.
1822 * @data: Pointer to the data associated with the PM domain provider.
1824 int of_genpd_add_provider_onecell(struct device_node *np,
1825 struct genpd_onecell_data *data)
1833 mutex_lock(&gpd_list_lock);
1836 data->xlate = genpd_xlate_onecell;
1838 for (i = 0; i < data->num_domains; i++) {
1839 if (!data->domains[i])
1841 if (!pm_genpd_present(data->domains[i]))
1844 data->domains[i]->provider = &np->fwnode;
1845 data->domains[i]->has_provider = true;
1848 ret = genpd_add_provider(np, data->xlate, data);
1852 mutex_unlock(&gpd_list_lock);
1858 if (!data->domains[i])
1860 data->domains[i]->provider = NULL;
1861 data->domains[i]->has_provider = false;
1864 mutex_unlock(&gpd_list_lock);
1868 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1871 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1872 * @np: Device node pointer associated with the PM domain provider
1874 void of_genpd_del_provider(struct device_node *np)
1876 struct of_genpd_provider *cp, *tmp;
1877 struct generic_pm_domain *gpd;
1879 mutex_lock(&gpd_list_lock);
1880 mutex_lock(&of_genpd_mutex);
1881 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
1882 if (cp->node == np) {
1884 * For each PM domain associated with the
1885 * provider, set the 'has_provider' to false
1886 * so that the PM domain can be safely removed.
1888 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1889 if (gpd->provider == &np->fwnode)
1890 gpd->has_provider = false;
1892 list_del(&cp->link);
1893 of_node_put(cp->node);
1898 mutex_unlock(&of_genpd_mutex);
1899 mutex_unlock(&gpd_list_lock);
1901 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1904 * genpd_get_from_provider() - Look-up PM domain
1905 * @genpdspec: OF phandle args to use for look-up
1907 * Looks for a PM domain provider under the node specified by @genpdspec and if
1908 * found, uses xlate function of the provider to map phandle args to a PM
1911 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1914 static struct generic_pm_domain *genpd_get_from_provider(
1915 struct of_phandle_args *genpdspec)
1917 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1918 struct of_genpd_provider *provider;
1921 return ERR_PTR(-EINVAL);
1923 mutex_lock(&of_genpd_mutex);
1925 /* Check if we have such a provider in our array */
1926 list_for_each_entry(provider, &of_genpd_providers, link) {
1927 if (provider->node == genpdspec->np)
1928 genpd = provider->xlate(genpdspec, provider->data);
1933 mutex_unlock(&of_genpd_mutex);
1939 * of_genpd_add_device() - Add a device to an I/O PM domain
1940 * @genpdspec: OF phandle args to use for look-up PM domain
1941 * @dev: Device to be added.
1943 * Looks-up an I/O PM domain based upon phandle args provided and adds
1944 * the device to the PM domain. Returns a negative error code on failure.
1946 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1948 struct generic_pm_domain *genpd;
1951 mutex_lock(&gpd_list_lock);
1953 genpd = genpd_get_from_provider(genpdspec);
1954 if (IS_ERR(genpd)) {
1955 ret = PTR_ERR(genpd);
1959 ret = genpd_add_device(genpd, dev, NULL);
1962 mutex_unlock(&gpd_list_lock);
1966 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1969 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1970 * @parent_spec: OF phandle args to use for parent PM domain look-up
1971 * @subdomain_spec: OF phandle args to use for subdomain look-up
1973 * Looks-up a parent PM domain and subdomain based upon phandle args
1974 * provided and adds the subdomain to the parent PM domain. Returns a
1975 * negative error code on failure.
1977 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1978 struct of_phandle_args *subdomain_spec)
1980 struct generic_pm_domain *parent, *subdomain;
1983 mutex_lock(&gpd_list_lock);
1985 parent = genpd_get_from_provider(parent_spec);
1986 if (IS_ERR(parent)) {
1987 ret = PTR_ERR(parent);
1991 subdomain = genpd_get_from_provider(subdomain_spec);
1992 if (IS_ERR(subdomain)) {
1993 ret = PTR_ERR(subdomain);
1997 ret = genpd_add_subdomain(parent, subdomain);
2000 mutex_unlock(&gpd_list_lock);
2004 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2007 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2008 * @provider: Pointer to device structure associated with provider
2010 * Find the last PM domain that was added by a particular provider and
2011 * remove this PM domain from the list of PM domains. The provider is
2012 * identified by the 'provider' device structure that is passed. The PM
2013 * domain will only be removed, if the provider associated with domain
2016 * Returns a valid pointer to struct generic_pm_domain on success or
2017 * ERR_PTR() on failure.
2019 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2021 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2024 if (IS_ERR_OR_NULL(np))
2025 return ERR_PTR(-EINVAL);
2027 mutex_lock(&gpd_list_lock);
2028 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2029 if (gpd->provider == &np->fwnode) {
2030 ret = genpd_remove(gpd);
2031 genpd = ret ? ERR_PTR(ret) : gpd;
2035 mutex_unlock(&gpd_list_lock);
2039 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2042 * genpd_dev_pm_detach - Detach a device from its PM domain.
2043 * @dev: Device to detach.
2044 * @power_off: Currently not used
2046 * Try to locate a corresponding generic PM domain, which the device was
2047 * attached to previously. If such is found, the device is detached from it.
2049 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2051 struct generic_pm_domain *pd;
2055 pd = dev_to_genpd(dev);
2059 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2061 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2062 ret = genpd_remove_device(pd, dev);
2071 dev_err(dev, "failed to remove from PM domain %s: %d",
2076 /* Check if PM domain can be powered off after removing this device. */
2077 genpd_queue_power_off_work(pd);
2080 static void genpd_dev_pm_sync(struct device *dev)
2082 struct generic_pm_domain *pd;
2084 pd = dev_to_genpd(dev);
2088 genpd_queue_power_off_work(pd);
2092 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2093 * @dev: Device to attach.
2095 * Parse device's OF node to find a PM domain specifier. If such is found,
2096 * attaches the device to retrieved pm_domain ops.
2098 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2099 * backwards compatibility with existing DTBs.
2101 * Returns 0 on successfully attached PM domain or negative error code. Note
2102 * that if a power-domain exists for the device, but it cannot be found or
2103 * turned on, then return -EPROBE_DEFER to ensure that the device is not
2104 * probed and to re-try again later.
2106 int genpd_dev_pm_attach(struct device *dev)
2108 struct of_phandle_args pd_args;
2109 struct generic_pm_domain *pd;
2119 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2120 "#power-domain-cells", 0, &pd_args);
2126 * Try legacy Samsung-specific bindings
2127 * (for backwards compatibility of DT ABI)
2129 pd_args.args_count = 0;
2130 pd_args.np = of_parse_phandle(dev->of_node,
2131 "samsung,power-domain", 0);
2136 mutex_lock(&gpd_list_lock);
2137 pd = genpd_get_from_provider(&pd_args);
2138 of_node_put(pd_args.np);
2140 mutex_unlock(&gpd_list_lock);
2141 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2142 __func__, PTR_ERR(pd));
2143 return -EPROBE_DEFER;
2146 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2148 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2149 ret = genpd_add_device(pd, dev, NULL);
2156 mutex_unlock(&gpd_list_lock);
2159 if (ret != -EPROBE_DEFER)
2160 dev_err(dev, "failed to add to PM domain %s: %d",
2165 dev->pm_domain->detach = genpd_dev_pm_detach;
2166 dev->pm_domain->sync = genpd_dev_pm_sync;
2169 ret = genpd_power_on(pd, 0);
2173 genpd_remove_device(pd, dev);
2175 return ret ? -EPROBE_DEFER : 0;
2177 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2179 static const struct of_device_id idle_state_match[] = {
2180 { .compatible = "domain-idle-state", },
2184 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2185 struct device_node *state_node)
2189 u32 entry_latency, exit_latency;
2191 err = of_property_read_u32(state_node, "entry-latency-us",
2194 pr_debug(" * %pOF missing entry-latency-us property\n",
2199 err = of_property_read_u32(state_node, "exit-latency-us",
2202 pr_debug(" * %pOF missing exit-latency-us property\n",
2207 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2209 genpd_state->residency_ns = 1000 * residency;
2211 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2212 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2213 genpd_state->fwnode = &state_node->fwnode;
2218 static int genpd_iterate_idle_states(struct device_node *dn,
2219 struct genpd_power_state *states)
2222 struct of_phandle_iterator it;
2223 struct device_node *np;
2226 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2230 /* Loop over the phandles until all the requested entry is found */
2231 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2233 if (!of_match_node(idle_state_match, np))
2236 ret = genpd_parse_state(&states[i], np);
2238 pr_err("Parsing idle state node %pOF failed with err %d\n",
2251 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2253 * @dn: The genpd device node
2254 * @states: The pointer to which the state array will be saved.
2255 * @n: The count of elements in the array returned from this function.
2257 * Returns the device states parsed from the OF node. The memory for the states
2258 * is allocated by this function and is the responsibility of the caller to
2259 * free the memory after use. If no domain idle states is found it returns
2260 * -EINVAL and in case of errors, a negative error code.
2262 int of_genpd_parse_idle_states(struct device_node *dn,
2263 struct genpd_power_state **states, int *n)
2265 struct genpd_power_state *st;
2268 ret = genpd_iterate_idle_states(dn, NULL);
2270 return ret < 0 ? ret : -EINVAL;
2272 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2276 ret = genpd_iterate_idle_states(dn, st);
2279 return ret < 0 ? ret : -EINVAL;
2287 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2289 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2292 /*** debugfs support ***/
2294 #ifdef CONFIG_DEBUG_FS
2295 #include <linux/pm.h>
2296 #include <linux/device.h>
2297 #include <linux/debugfs.h>
2298 #include <linux/seq_file.h>
2299 #include <linux/init.h>
2300 #include <linux/kobject.h>
2301 static struct dentry *pm_genpd_debugfs_dir;
2304 * TODO: This function is a slightly modified version of rtpm_status_show
2305 * from sysfs.c, so generalize it.
2307 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2309 static const char * const status_lookup[] = {
2310 [RPM_ACTIVE] = "active",
2311 [RPM_RESUMING] = "resuming",
2312 [RPM_SUSPENDED] = "suspended",
2313 [RPM_SUSPENDING] = "suspending"
2317 if (dev->power.runtime_error)
2319 else if (dev->power.disable_depth)
2321 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2322 p = status_lookup[dev->power.runtime_status];
2329 static int pm_genpd_summary_one(struct seq_file *s,
2330 struct generic_pm_domain *genpd)
2332 static const char * const status_lookup[] = {
2333 [GPD_STATE_ACTIVE] = "on",
2334 [GPD_STATE_POWER_OFF] = "off"
2336 struct pm_domain_data *pm_data;
2337 const char *kobj_path;
2338 struct gpd_link *link;
2342 ret = genpd_lock_interruptible(genpd);
2344 return -ERESTARTSYS;
2346 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2348 if (!genpd_status_on(genpd))
2349 snprintf(state, sizeof(state), "%s-%u",
2350 status_lookup[genpd->status], genpd->state_idx);
2352 snprintf(state, sizeof(state), "%s",
2353 status_lookup[genpd->status]);
2354 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2357 * Modifications on the list require holding locks on both
2358 * master and slave, so we are safe.
2359 * Also genpd->name is immutable.
2361 list_for_each_entry(link, &genpd->master_links, master_node) {
2362 seq_printf(s, "%s", link->slave->name);
2363 if (!list_is_last(&link->master_node, &genpd->master_links))
2367 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2368 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2369 genpd_is_irq_safe(genpd) ?
2370 GFP_ATOMIC : GFP_KERNEL);
2371 if (kobj_path == NULL)
2374 seq_printf(s, "\n %-50s ", kobj_path);
2375 rtpm_status_str(s, pm_data->dev);
2381 genpd_unlock(genpd);
2386 static int genpd_summary_show(struct seq_file *s, void *data)
2388 struct generic_pm_domain *genpd;
2391 seq_puts(s, "domain status slaves\n");
2392 seq_puts(s, " /device runtime status\n");
2393 seq_puts(s, "----------------------------------------------------------------------\n");
2395 ret = mutex_lock_interruptible(&gpd_list_lock);
2397 return -ERESTARTSYS;
2399 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2400 ret = pm_genpd_summary_one(s, genpd);
2404 mutex_unlock(&gpd_list_lock);
2409 static int genpd_status_show(struct seq_file *s, void *data)
2411 static const char * const status_lookup[] = {
2412 [GPD_STATE_ACTIVE] = "on",
2413 [GPD_STATE_POWER_OFF] = "off"
2416 struct generic_pm_domain *genpd = s->private;
2419 ret = genpd_lock_interruptible(genpd);
2421 return -ERESTARTSYS;
2423 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2426 if (genpd->status == GPD_STATE_POWER_OFF)
2427 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2430 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2432 genpd_unlock(genpd);
2436 static int genpd_sub_domains_show(struct seq_file *s, void *data)
2438 struct generic_pm_domain *genpd = s->private;
2439 struct gpd_link *link;
2442 ret = genpd_lock_interruptible(genpd);
2444 return -ERESTARTSYS;
2446 list_for_each_entry(link, &genpd->master_links, master_node)
2447 seq_printf(s, "%s\n", link->slave->name);
2449 genpd_unlock(genpd);
2453 static int genpd_idle_states_show(struct seq_file *s, void *data)
2455 struct generic_pm_domain *genpd = s->private;
2459 ret = genpd_lock_interruptible(genpd);
2461 return -ERESTARTSYS;
2463 seq_puts(s, "State Time Spent(ms)\n");
2465 for (i = 0; i < genpd->state_count; i++) {
2469 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2470 (genpd->state_idx == i))
2471 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2473 msecs = ktime_to_ms(
2474 ktime_add(genpd->states[i].idle_time, delta));
2475 seq_printf(s, "S%-13i %lld\n", i, msecs);
2478 genpd_unlock(genpd);
2482 static int genpd_active_time_show(struct seq_file *s, void *data)
2484 struct generic_pm_domain *genpd = s->private;
2488 ret = genpd_lock_interruptible(genpd);
2490 return -ERESTARTSYS;
2492 if (genpd->status == GPD_STATE_ACTIVE)
2493 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2495 seq_printf(s, "%lld ms\n", ktime_to_ms(
2496 ktime_add(genpd->on_time, delta)));
2498 genpd_unlock(genpd);
2502 static int genpd_total_idle_time_show(struct seq_file *s, void *data)
2504 struct generic_pm_domain *genpd = s->private;
2505 ktime_t delta = 0, total = 0;
2509 ret = genpd_lock_interruptible(genpd);
2511 return -ERESTARTSYS;
2513 for (i = 0; i < genpd->state_count; i++) {
2515 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2516 (genpd->state_idx == i))
2517 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2519 total = ktime_add(total, genpd->states[i].idle_time);
2521 total = ktime_add(total, delta);
2523 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2525 genpd_unlock(genpd);
2530 static int genpd_devices_show(struct seq_file *s, void *data)
2532 struct generic_pm_domain *genpd = s->private;
2533 struct pm_domain_data *pm_data;
2534 const char *kobj_path;
2537 ret = genpd_lock_interruptible(genpd);
2539 return -ERESTARTSYS;
2541 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2542 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2543 genpd_is_irq_safe(genpd) ?
2544 GFP_ATOMIC : GFP_KERNEL);
2545 if (kobj_path == NULL)
2548 seq_printf(s, "%s\n", kobj_path);
2552 genpd_unlock(genpd);
2556 #define define_genpd_open_function(name) \
2557 static int genpd_##name##_open(struct inode *inode, struct file *file) \
2559 return single_open(file, genpd_##name##_show, inode->i_private); \
2562 define_genpd_open_function(summary);
2563 define_genpd_open_function(status);
2564 define_genpd_open_function(sub_domains);
2565 define_genpd_open_function(idle_states);
2566 define_genpd_open_function(active_time);
2567 define_genpd_open_function(total_idle_time);
2568 define_genpd_open_function(devices);
2570 #define define_genpd_debugfs_fops(name) \
2571 static const struct file_operations genpd_##name##_fops = { \
2572 .open = genpd_##name##_open, \
2574 .llseek = seq_lseek, \
2575 .release = single_release, \
2578 define_genpd_debugfs_fops(summary);
2579 define_genpd_debugfs_fops(status);
2580 define_genpd_debugfs_fops(sub_domains);
2581 define_genpd_debugfs_fops(idle_states);
2582 define_genpd_debugfs_fops(active_time);
2583 define_genpd_debugfs_fops(total_idle_time);
2584 define_genpd_debugfs_fops(devices);
2586 static int __init pm_genpd_debug_init(void)
2589 struct generic_pm_domain *genpd;
2591 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2593 if (!pm_genpd_debugfs_dir)
2596 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2597 pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
2601 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2602 d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
2606 debugfs_create_file("current_state", 0444,
2607 d, genpd, &genpd_status_fops);
2608 debugfs_create_file("sub_domains", 0444,
2609 d, genpd, &genpd_sub_domains_fops);
2610 debugfs_create_file("idle_states", 0444,
2611 d, genpd, &genpd_idle_states_fops);
2612 debugfs_create_file("active_time", 0444,
2613 d, genpd, &genpd_active_time_fops);
2614 debugfs_create_file("total_idle_time", 0444,
2615 d, genpd, &genpd_total_idle_time_fops);
2616 debugfs_create_file("devices", 0444,
2617 d, genpd, &genpd_devices_fops);
2622 late_initcall(pm_genpd_debug_init);
2624 static void __exit pm_genpd_debug_exit(void)
2626 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2628 __exitcall(pm_genpd_debug_exit);
2629 #endif /* CONFIG_DEBUG_FS */