1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/domain.c - Common code related to device power domains.
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
7 #define pr_fmt(fmt) "PM: " fmt
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
26 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30 type (*__routine)(struct device *__d); \
31 type __ret = (type)0; \
33 __routine = genpd->dev_ops.callback; \
35 __ret = __routine(dev); \
40 static LIST_HEAD(gpd_list);
41 static DEFINE_MUTEX(gpd_list_lock);
43 struct genpd_lock_ops {
44 void (*lock)(struct generic_pm_domain *genpd);
45 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
46 int (*lock_interruptible)(struct generic_pm_domain *genpd);
47 void (*unlock)(struct generic_pm_domain *genpd);
50 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52 mutex_lock(&genpd->mlock);
55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 mutex_lock_nested(&genpd->mlock, depth);
61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63 return mutex_lock_interruptible(&genpd->mlock);
66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68 return mutex_unlock(&genpd->mlock);
71 static const struct genpd_lock_ops genpd_mtx_ops = {
72 .lock = genpd_lock_mtx,
73 .lock_nested = genpd_lock_nested_mtx,
74 .lock_interruptible = genpd_lock_interruptible_mtx,
75 .unlock = genpd_unlock_mtx,
78 static void genpd_lock_spin(struct generic_pm_domain *genpd)
79 __acquires(&genpd->slock)
83 spin_lock_irqsave(&genpd->slock, flags);
84 genpd->lock_flags = flags;
87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 __acquires(&genpd->slock)
93 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
94 genpd->lock_flags = flags;
97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
98 __acquires(&genpd->slock)
102 spin_lock_irqsave(&genpd->slock, flags);
103 genpd->lock_flags = flags;
107 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
108 __releases(&genpd->slock)
110 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113 static const struct genpd_lock_ops genpd_spin_ops = {
114 .lock = genpd_lock_spin,
115 .lock_nested = genpd_lock_nested_spin,
116 .lock_interruptible = genpd_lock_interruptible_spin,
117 .unlock = genpd_unlock_spin,
120 #define genpd_lock(p) p->lock_ops->lock(p)
121 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
122 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
123 #define genpd_unlock(p) p->lock_ops->unlock(p)
125 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
126 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
127 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
130 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
131 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
133 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
134 const struct generic_pm_domain *genpd)
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
141 * Warn once if an IRQ safe device is attached to a domain, which
142 * callbacks are allowed to sleep. This indicates a suboptimal
143 * configuration for PM, but it doesn't matter for an always on domain.
145 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
149 dev_warn_once(dev, "PM domain %s will not be powered off\n",
155 static int genpd_runtime_suspend(struct device *dev);
158 * Get the generic PM domain for a particular struct device.
159 * This validates the struct device pointer, the PM domain pointer,
160 * and checks that the PM domain pointer is a real generic PM domain.
161 * Any failure results in NULL being returned.
163 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
165 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
168 /* A genpd's always have its ->runtime_suspend() callback assigned. */
169 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
170 return pd_to_genpd(dev->pm_domain);
176 * This should only be used where we are certain that the pm_domain
177 * attached to the device is a genpd domain.
179 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
181 if (IS_ERR_OR_NULL(dev->pm_domain))
182 return ERR_PTR(-EINVAL);
184 return pd_to_genpd(dev->pm_domain);
187 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
190 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
193 static int genpd_start_dev(const struct generic_pm_domain *genpd,
196 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
199 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
203 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
204 ret = !!atomic_dec_and_test(&genpd->sd_count);
209 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
211 atomic_inc(&genpd->sd_count);
212 smp_mb__after_atomic();
215 #ifdef CONFIG_DEBUG_FS
216 static struct dentry *genpd_debugfs_dir;
218 static void genpd_debug_add(struct generic_pm_domain *genpd);
220 static void genpd_debug_remove(struct generic_pm_domain *genpd)
222 if (!genpd_debugfs_dir)
225 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
228 static void genpd_update_accounting(struct generic_pm_domain *genpd)
232 now = ktime_get_mono_fast_ns();
233 if (now <= genpd->accounting_time)
236 delta = now - genpd->accounting_time;
239 * If genpd->status is active, it means we are just
240 * out of off and so update the idle time and vice
243 if (genpd->status == GENPD_STATE_ON)
244 genpd->states[genpd->state_idx].idle_time += delta;
246 genpd->on_time += delta;
248 genpd->accounting_time = now;
251 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
252 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
253 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
256 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
259 struct generic_pm_domain_data *pd_data;
260 struct pm_domain_data *pdd;
261 struct gpd_link *link;
263 /* New requested state is same as Max requested state */
264 if (state == genpd->performance_state)
267 /* New requested state is higher than Max requested state */
268 if (state > genpd->performance_state)
271 /* Traverse all devices within the domain */
272 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
273 pd_data = to_gpd_data(pdd);
275 if (pd_data->performance_state > state)
276 state = pd_data->performance_state;
280 * Traverse all sub-domains within the domain. This can be
281 * done without any additional locking as the link->performance_state
282 * field is protected by the parent genpd->lock, which is already taken.
284 * Also note that link->performance_state (subdomain's performance state
285 * requirement to parent domain) is different from
286 * link->child->performance_state (current performance state requirement
287 * of the devices/sub-domains of the subdomain) and so can have a
290 * Note that we also take vote from powered-off sub-domains into account
291 * as the same is done for devices right now.
293 list_for_each_entry(link, &genpd->parent_links, parent_node) {
294 if (link->performance_state > state)
295 state = link->performance_state;
301 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
302 struct generic_pm_domain *parent,
305 if (!parent->set_performance_state)
308 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
313 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
314 unsigned int state, int depth)
316 struct generic_pm_domain *parent;
317 struct gpd_link *link;
318 int parent_state, ret;
320 if (state == genpd->performance_state)
323 /* Propagate to parents of genpd */
324 list_for_each_entry(link, &genpd->child_links, child_node) {
325 parent = link->parent;
327 /* Find parent's performance state */
328 ret = genpd_xlate_performance_state(genpd, parent, state);
329 if (unlikely(ret < 0))
334 genpd_lock_nested(parent, depth + 1);
336 link->prev_performance_state = link->performance_state;
337 link->performance_state = parent_state;
338 parent_state = _genpd_reeval_performance_state(parent,
340 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
342 link->performance_state = link->prev_performance_state;
344 genpd_unlock(parent);
350 if (genpd->set_performance_state) {
351 ret = genpd->set_performance_state(genpd, state);
356 genpd->performance_state = state;
360 /* Encountered an error, lets rollback */
361 list_for_each_entry_continue_reverse(link, &genpd->child_links,
363 parent = link->parent;
365 genpd_lock_nested(parent, depth + 1);
367 parent_state = link->prev_performance_state;
368 link->performance_state = parent_state;
370 parent_state = _genpd_reeval_performance_state(parent,
372 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
373 pr_err("%s: Failed to roll back to %d performance state\n",
374 parent->name, parent_state);
377 genpd_unlock(parent);
383 static int genpd_set_performance_state(struct device *dev, unsigned int state)
385 struct generic_pm_domain *genpd = dev_to_genpd(dev);
386 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
387 unsigned int prev_state;
390 prev_state = gpd_data->performance_state;
391 if (prev_state == state)
394 gpd_data->performance_state = state;
395 state = _genpd_reeval_performance_state(genpd, state);
397 ret = _genpd_set_performance_state(genpd, state, 0);
399 gpd_data->performance_state = prev_state;
404 static int genpd_drop_performance_state(struct device *dev)
406 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
408 if (!genpd_set_performance_state(dev, 0))
414 static void genpd_restore_performance_state(struct device *dev,
418 genpd_set_performance_state(dev, state);
421 static int genpd_dev_pm_set_performance_state(struct device *dev,
424 struct generic_pm_domain *genpd = dev_to_genpd(dev);
428 if (pm_runtime_suspended(dev)) {
429 dev_gpd_data(dev)->rpm_pstate = state;
431 ret = genpd_set_performance_state(dev, state);
433 dev_gpd_data(dev)->rpm_pstate = 0;
441 * dev_pm_genpd_set_performance_state- Set performance state of device's power
444 * @dev: Device for which the performance-state needs to be set.
445 * @state: Target performance state of the device. This can be set as 0 when the
446 * device doesn't have any performance state constraints left (And so
447 * the device wouldn't participate anymore to find the target
448 * performance state of the genpd).
450 * It is assumed that the users guarantee that the genpd wouldn't be detached
451 * while this routine is getting called.
453 * Returns 0 on success and negative error values on failures.
455 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
457 struct generic_pm_domain *genpd;
459 genpd = dev_to_genpd_safe(dev);
463 if (WARN_ON(!dev->power.subsys_data ||
464 !dev->power.subsys_data->domain_data))
467 return genpd_dev_pm_set_performance_state(dev, state);
469 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
472 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
474 * @dev: Device to handle
475 * @next: impending interrupt/wakeup for the device
478 * Allow devices to inform of the next wakeup. It's assumed that the users
479 * guarantee that the genpd wouldn't be detached while this routine is getting
480 * called. Additionally, it's also assumed that @dev isn't runtime suspended
482 * Although devices are expected to update the next_wakeup after the end of
483 * their usecase as well, it is possible the devices themselves may not know
484 * about that, so stale @next will be ignored when powering off the domain.
486 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
488 struct generic_pm_domain *genpd;
489 struct gpd_timing_data *td;
491 genpd = dev_to_genpd_safe(dev);
495 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
497 td->next_wakeup = next;
499 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
502 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
503 * @dev: A device that is attached to the genpd.
505 * This routine should typically be called for a device, at the point of when a
506 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
508 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
509 * valid value have been set.
511 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
513 struct generic_pm_domain *genpd;
515 genpd = dev_to_genpd_safe(dev);
520 return genpd->gd->next_hrtimer;
524 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
527 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
529 * @dev: A device that is attached to the genpd.
531 * Allows a consumer of the genpd to notify the provider that the next power off
532 * should be synchronous.
534 * It is assumed that the users guarantee that the genpd wouldn't be detached
535 * while this routine is getting called.
537 void dev_pm_genpd_synced_poweroff(struct device *dev)
539 struct generic_pm_domain *genpd;
541 genpd = dev_to_genpd_safe(dev);
546 genpd->synced_poweroff = true;
549 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
551 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
553 unsigned int state_idx = genpd->state_idx;
558 /* Notify consumers that we are about to power on. */
559 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
561 GENPD_NOTIFY_OFF, NULL);
562 ret = notifier_to_errno(ret);
566 if (!genpd->power_on)
569 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
571 ret = genpd->power_on(genpd);
578 time_start = ktime_get();
579 ret = genpd->power_on(genpd);
583 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
584 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
587 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
588 genpd->gd->max_off_time_changed = true;
589 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
590 genpd->name, "on", elapsed_ns);
593 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
594 genpd->synced_poweroff = false;
597 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
602 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
604 unsigned int state_idx = genpd->state_idx;
609 /* Notify consumers that we are about to power off. */
610 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
611 GENPD_NOTIFY_PRE_OFF,
612 GENPD_NOTIFY_ON, NULL);
613 ret = notifier_to_errno(ret);
617 if (!genpd->power_off)
620 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
622 ret = genpd->power_off(genpd);
629 time_start = ktime_get();
630 ret = genpd->power_off(genpd);
634 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
635 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
638 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
639 genpd->gd->max_off_time_changed = true;
640 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
641 genpd->name, "off", elapsed_ns);
644 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
648 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
653 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
654 * @genpd: PM domain to power off.
656 * Queue up the execution of genpd_power_off() unless it's already been done
659 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
661 queue_work(pm_wq, &genpd->power_off_work);
665 * genpd_power_off - Remove power from a given PM domain.
666 * @genpd: PM domain to power down.
667 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
668 * RPM status of the releated device is in an intermediate state, not yet turned
669 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
670 * be RPM_SUSPENDED, while it tries to power off the PM domain.
671 * @depth: nesting count for lockdep.
673 * If all of the @genpd's devices have been suspended and all of its subdomains
674 * have been powered down, remove power from @genpd.
676 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
679 struct pm_domain_data *pdd;
680 struct gpd_link *link;
681 unsigned int not_suspended = 0;
685 * Do not try to power off the domain in the following situations:
686 * (1) The domain is already in the "power off" state.
687 * (2) System suspend is in progress.
689 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
693 * Abort power off for the PM domain in the following situations:
694 * (1) The domain is configured as always on.
695 * (2) When the domain has a subdomain being powered on.
697 if (genpd_is_always_on(genpd) ||
698 genpd_is_rpm_always_on(genpd) ||
699 atomic_read(&genpd->sd_count) > 0)
703 * The children must be in their deepest (powered-off) states to allow
704 * the parent to be powered off. Note that, there's no need for
705 * additional locking, as powering on a child, requires the parent's
706 * lock to be acquired first.
708 list_for_each_entry(link, &genpd->parent_links, parent_node) {
709 struct generic_pm_domain *child = link->child;
710 if (child->state_idx < child->state_count - 1)
714 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
716 * Do not allow PM domain to be powered off, when an IRQ safe
717 * device is part of a non-IRQ safe domain.
719 if (!pm_runtime_suspended(pdd->dev) ||
720 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
724 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
727 if (genpd->gov && genpd->gov->power_down_ok) {
728 if (!genpd->gov->power_down_ok(&genpd->domain))
732 /* Default to shallowest state. */
734 genpd->state_idx = 0;
736 /* Don't power off, if a child domain is waiting to power on. */
737 if (atomic_read(&genpd->sd_count) > 0)
740 ret = _genpd_power_off(genpd, true);
742 genpd->states[genpd->state_idx].rejected++;
746 genpd->status = GENPD_STATE_OFF;
747 genpd_update_accounting(genpd);
748 genpd->states[genpd->state_idx].usage++;
750 list_for_each_entry(link, &genpd->child_links, child_node) {
751 genpd_sd_counter_dec(link->parent);
752 genpd_lock_nested(link->parent, depth + 1);
753 genpd_power_off(link->parent, false, depth + 1);
754 genpd_unlock(link->parent);
761 * genpd_power_on - Restore power to a given PM domain and its parents.
762 * @genpd: PM domain to power up.
763 * @depth: nesting count for lockdep.
765 * Restore power to @genpd and all of its parents so that it is possible to
766 * resume a device belonging to it.
768 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
770 struct gpd_link *link;
773 if (genpd_status_on(genpd))
777 * The list is guaranteed not to change while the loop below is being
778 * executed, unless one of the parents' .power_on() callbacks fiddles
781 list_for_each_entry(link, &genpd->child_links, child_node) {
782 struct generic_pm_domain *parent = link->parent;
784 genpd_sd_counter_inc(parent);
786 genpd_lock_nested(parent, depth + 1);
787 ret = genpd_power_on(parent, depth + 1);
788 genpd_unlock(parent);
791 genpd_sd_counter_dec(parent);
796 ret = _genpd_power_on(genpd, true);
800 genpd->status = GENPD_STATE_ON;
801 genpd_update_accounting(genpd);
806 list_for_each_entry_continue_reverse(link,
809 genpd_sd_counter_dec(link->parent);
810 genpd_lock_nested(link->parent, depth + 1);
811 genpd_power_off(link->parent, false, depth + 1);
812 genpd_unlock(link->parent);
818 static int genpd_dev_pm_start(struct device *dev)
820 struct generic_pm_domain *genpd = dev_to_genpd(dev);
822 return genpd_start_dev(genpd, dev);
825 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
826 unsigned long val, void *ptr)
828 struct generic_pm_domain_data *gpd_data;
831 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
832 dev = gpd_data->base.dev;
835 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
836 struct pm_domain_data *pdd;
837 struct gpd_timing_data *td;
839 spin_lock_irq(&dev->power.lock);
841 pdd = dev->power.subsys_data ?
842 dev->power.subsys_data->domain_data : NULL;
844 td = to_gpd_data(pdd)->td;
846 td->constraint_changed = true;
847 genpd = dev_to_genpd(dev);
851 spin_unlock_irq(&dev->power.lock);
853 if (!IS_ERR(genpd)) {
855 genpd->gd->max_off_time_changed = true;
860 if (!dev || dev->power.ignore_children)
868 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
869 * @work: Work structure used for scheduling the execution of this function.
871 static void genpd_power_off_work_fn(struct work_struct *work)
873 struct generic_pm_domain *genpd;
875 genpd = container_of(work, struct generic_pm_domain, power_off_work);
878 genpd_power_off(genpd, false, 0);
883 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
884 * @dev: Device to handle.
886 static int __genpd_runtime_suspend(struct device *dev)
888 int (*cb)(struct device *__dev);
890 if (dev->type && dev->type->pm)
891 cb = dev->type->pm->runtime_suspend;
892 else if (dev->class && dev->class->pm)
893 cb = dev->class->pm->runtime_suspend;
894 else if (dev->bus && dev->bus->pm)
895 cb = dev->bus->pm->runtime_suspend;
899 if (!cb && dev->driver && dev->driver->pm)
900 cb = dev->driver->pm->runtime_suspend;
902 return cb ? cb(dev) : 0;
906 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
907 * @dev: Device to handle.
909 static int __genpd_runtime_resume(struct device *dev)
911 int (*cb)(struct device *__dev);
913 if (dev->type && dev->type->pm)
914 cb = dev->type->pm->runtime_resume;
915 else if (dev->class && dev->class->pm)
916 cb = dev->class->pm->runtime_resume;
917 else if (dev->bus && dev->bus->pm)
918 cb = dev->bus->pm->runtime_resume;
922 if (!cb && dev->driver && dev->driver->pm)
923 cb = dev->driver->pm->runtime_resume;
925 return cb ? cb(dev) : 0;
929 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
930 * @dev: Device to suspend.
932 * Carry out a runtime suspend of a device under the assumption that its
933 * pm_domain field points to the domain member of an object of type
934 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
936 static int genpd_runtime_suspend(struct device *dev)
938 struct generic_pm_domain *genpd;
939 bool (*suspend_ok)(struct device *__dev);
940 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
941 struct gpd_timing_data *td = gpd_data->td;
942 bool runtime_pm = pm_runtime_enabled(dev);
943 ktime_t time_start = 0;
947 dev_dbg(dev, "%s()\n", __func__);
949 genpd = dev_to_genpd(dev);
954 * A runtime PM centric subsystem/driver may re-use the runtime PM
955 * callbacks for other purposes than runtime PM. In those scenarios
956 * runtime PM is disabled. Under these circumstances, we shall skip
957 * validating/measuring the PM QoS latency.
959 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
960 if (runtime_pm && suspend_ok && !suspend_ok(dev))
963 /* Measure suspend latency. */
964 if (td && runtime_pm)
965 time_start = ktime_get();
967 ret = __genpd_runtime_suspend(dev);
971 ret = genpd_stop_dev(genpd, dev);
973 __genpd_runtime_resume(dev);
977 /* Update suspend latency value if the measured time exceeds it. */
978 if (td && runtime_pm) {
979 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
980 if (elapsed_ns > td->suspend_latency_ns) {
981 td->suspend_latency_ns = elapsed_ns;
982 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
984 genpd->gd->max_off_time_changed = true;
985 td->constraint_changed = true;
990 * If power.irq_safe is set, this routine may be run with
991 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
993 if (irq_safe_dev_in_sleep_domain(dev, genpd))
997 genpd_power_off(genpd, true, 0);
998 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1005 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1006 * @dev: Device to resume.
1008 * Carry out a runtime resume of a device under the assumption that its
1009 * pm_domain field points to the domain member of an object of type
1010 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1012 static int genpd_runtime_resume(struct device *dev)
1014 struct generic_pm_domain *genpd;
1015 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1016 struct gpd_timing_data *td = gpd_data->td;
1017 bool timed = td && pm_runtime_enabled(dev);
1018 ktime_t time_start = 0;
1022 dev_dbg(dev, "%s()\n", __func__);
1024 genpd = dev_to_genpd(dev);
1029 * As we don't power off a non IRQ safe domain, which holds
1030 * an IRQ safe device, we don't need to restore power to it.
1032 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1036 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1037 ret = genpd_power_on(genpd, 0);
1038 genpd_unlock(genpd);
1044 /* Measure resume latency. */
1046 time_start = ktime_get();
1048 ret = genpd_start_dev(genpd, dev);
1052 ret = __genpd_runtime_resume(dev);
1056 /* Update resume latency value if the measured time exceeds it. */
1058 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1059 if (elapsed_ns > td->resume_latency_ns) {
1060 td->resume_latency_ns = elapsed_ns;
1061 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1063 genpd->gd->max_off_time_changed = true;
1064 td->constraint_changed = true;
1071 genpd_stop_dev(genpd, dev);
1073 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1075 genpd_power_off(genpd, true, 0);
1076 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1077 genpd_unlock(genpd);
1083 static bool pd_ignore_unused;
1084 static int __init pd_ignore_unused_setup(char *__unused)
1086 pd_ignore_unused = true;
1089 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1092 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1094 static int __init genpd_power_off_unused(void)
1096 struct generic_pm_domain *genpd;
1098 if (pd_ignore_unused) {
1099 pr_warn("genpd: Not disabling unused power domains\n");
1103 mutex_lock(&gpd_list_lock);
1105 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1106 genpd_queue_power_off_work(genpd);
1108 mutex_unlock(&gpd_list_lock);
1112 late_initcall_sync(genpd_power_off_unused);
1114 #ifdef CONFIG_PM_SLEEP
1117 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1118 * @genpd: PM domain to power off, if possible.
1119 * @use_lock: use the lock.
1120 * @depth: nesting count for lockdep.
1122 * Check if the given PM domain can be powered off (during system suspend or
1123 * hibernation) and do that if so. Also, in that case propagate to its parents.
1125 * This function is only called in "noirq" and "syscore" stages of system power
1126 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1127 * these cases the lock must be held.
1129 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1132 struct gpd_link *link;
1134 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1137 if (genpd->suspended_count != genpd->device_count
1138 || atomic_read(&genpd->sd_count) > 0)
1141 /* Check that the children are in their deepest (powered-off) state. */
1142 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1143 struct generic_pm_domain *child = link->child;
1144 if (child->state_idx < child->state_count - 1)
1148 /* Choose the deepest state when suspending */
1149 genpd->state_idx = genpd->state_count - 1;
1150 if (_genpd_power_off(genpd, false))
1153 genpd->status = GENPD_STATE_OFF;
1155 list_for_each_entry(link, &genpd->child_links, child_node) {
1156 genpd_sd_counter_dec(link->parent);
1159 genpd_lock_nested(link->parent, depth + 1);
1161 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1164 genpd_unlock(link->parent);
1169 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1170 * @genpd: PM domain to power on.
1171 * @use_lock: use the lock.
1172 * @depth: nesting count for lockdep.
1174 * This function is only called in "noirq" and "syscore" stages of system power
1175 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1176 * these cases the lock must be held.
1178 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1181 struct gpd_link *link;
1183 if (genpd_status_on(genpd))
1186 list_for_each_entry(link, &genpd->child_links, child_node) {
1187 genpd_sd_counter_inc(link->parent);
1190 genpd_lock_nested(link->parent, depth + 1);
1192 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1195 genpd_unlock(link->parent);
1198 _genpd_power_on(genpd, false);
1199 genpd->status = GENPD_STATE_ON;
1203 * genpd_prepare - Start power transition of a device in a PM domain.
1204 * @dev: Device to start the transition of.
1206 * Start a power transition of a device (during a system-wide power transition)
1207 * under the assumption that its pm_domain field points to the domain member of
1208 * an object of type struct generic_pm_domain representing a PM domain
1209 * consisting of I/O devices.
1211 static int genpd_prepare(struct device *dev)
1213 struct generic_pm_domain *genpd;
1216 dev_dbg(dev, "%s()\n", __func__);
1218 genpd = dev_to_genpd(dev);
1224 if (genpd->prepared_count++ == 0)
1225 genpd->suspended_count = 0;
1227 genpd_unlock(genpd);
1229 ret = pm_generic_prepare(dev);
1233 genpd->prepared_count--;
1235 genpd_unlock(genpd);
1238 /* Never return 1, as genpd don't cope with the direct_complete path. */
1239 return ret >= 0 ? 0 : ret;
1243 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1245 * @dev: Device to suspend.
1246 * @suspend_noirq: Generic suspend_noirq callback.
1247 * @resume_noirq: Generic resume_noirq callback.
1249 * Stop the device and remove power from the domain if all devices in it have
1252 static int genpd_finish_suspend(struct device *dev,
1253 int (*suspend_noirq)(struct device *dev),
1254 int (*resume_noirq)(struct device *dev))
1256 struct generic_pm_domain *genpd;
1259 genpd = dev_to_genpd(dev);
1263 ret = suspend_noirq(dev);
1267 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1270 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1271 !pm_runtime_status_suspended(dev)) {
1272 ret = genpd_stop_dev(genpd, dev);
1280 genpd->suspended_count++;
1281 genpd_sync_power_off(genpd, true, 0);
1282 genpd_unlock(genpd);
1288 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1289 * @dev: Device to suspend.
1291 * Stop the device and remove power from the domain if all devices in it have
1294 static int genpd_suspend_noirq(struct device *dev)
1296 dev_dbg(dev, "%s()\n", __func__);
1298 return genpd_finish_suspend(dev,
1299 pm_generic_suspend_noirq,
1300 pm_generic_resume_noirq);
1304 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1305 * @dev: Device to resume.
1306 * @resume_noirq: Generic resume_noirq callback.
1308 * Restore power to the device's PM domain, if necessary, and start the device.
1310 static int genpd_finish_resume(struct device *dev,
1311 int (*resume_noirq)(struct device *dev))
1313 struct generic_pm_domain *genpd;
1316 dev_dbg(dev, "%s()\n", __func__);
1318 genpd = dev_to_genpd(dev);
1322 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1323 return resume_noirq(dev);
1326 genpd_sync_power_on(genpd, true, 0);
1327 genpd->suspended_count--;
1328 genpd_unlock(genpd);
1330 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1331 !pm_runtime_status_suspended(dev)) {
1332 ret = genpd_start_dev(genpd, dev);
1337 return pm_generic_resume_noirq(dev);
1341 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1342 * @dev: Device to resume.
1344 * Restore power to the device's PM domain, if necessary, and start the device.
1346 static int genpd_resume_noirq(struct device *dev)
1348 dev_dbg(dev, "%s()\n", __func__);
1350 return genpd_finish_resume(dev, pm_generic_resume_noirq);
1354 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1355 * @dev: Device to freeze.
1357 * Carry out a late freeze of a device under the assumption that its
1358 * pm_domain field points to the domain member of an object of type
1359 * struct generic_pm_domain representing a power domain consisting of I/O
1362 static int genpd_freeze_noirq(struct device *dev)
1364 dev_dbg(dev, "%s()\n", __func__);
1366 return genpd_finish_suspend(dev,
1367 pm_generic_freeze_noirq,
1368 pm_generic_thaw_noirq);
1372 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1373 * @dev: Device to thaw.
1375 * Start the device, unless power has been removed from the domain already
1376 * before the system transition.
1378 static int genpd_thaw_noirq(struct device *dev)
1380 dev_dbg(dev, "%s()\n", __func__);
1382 return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1386 * genpd_poweroff_noirq - Completion of hibernation of device in an
1388 * @dev: Device to poweroff.
1390 * Stop the device and remove power from the domain if all devices in it have
1393 static int genpd_poweroff_noirq(struct device *dev)
1395 dev_dbg(dev, "%s()\n", __func__);
1397 return genpd_finish_suspend(dev,
1398 pm_generic_poweroff_noirq,
1399 pm_generic_restore_noirq);
1403 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1404 * @dev: Device to resume.
1406 * Make sure the domain will be in the same power state as before the
1407 * hibernation the system is resuming from and start the device if necessary.
1409 static int genpd_restore_noirq(struct device *dev)
1411 dev_dbg(dev, "%s()\n", __func__);
1413 return genpd_finish_resume(dev, pm_generic_restore_noirq);
1417 * genpd_complete - Complete power transition of a device in a power domain.
1418 * @dev: Device to complete the transition of.
1420 * Complete a power transition of a device (during a system-wide power
1421 * transition) under the assumption that its pm_domain field points to the
1422 * domain member of an object of type struct generic_pm_domain representing
1423 * a power domain consisting of I/O devices.
1425 static void genpd_complete(struct device *dev)
1427 struct generic_pm_domain *genpd;
1429 dev_dbg(dev, "%s()\n", __func__);
1431 genpd = dev_to_genpd(dev);
1435 pm_generic_complete(dev);
1439 genpd->prepared_count--;
1440 if (!genpd->prepared_count)
1441 genpd_queue_power_off_work(genpd);
1443 genpd_unlock(genpd);
1446 static void genpd_switch_state(struct device *dev, bool suspend)
1448 struct generic_pm_domain *genpd;
1451 genpd = dev_to_genpd_safe(dev);
1455 use_lock = genpd_is_irq_safe(genpd);
1461 genpd->suspended_count++;
1462 genpd_sync_power_off(genpd, use_lock, 0);
1464 genpd_sync_power_on(genpd, use_lock, 0);
1465 genpd->suspended_count--;
1469 genpd_unlock(genpd);
1473 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1474 * @dev: The device that is attached to the genpd, that can be suspended.
1476 * This routine should typically be called for a device that needs to be
1477 * suspended during the syscore suspend phase. It may also be called during
1478 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1481 void dev_pm_genpd_suspend(struct device *dev)
1483 genpd_switch_state(dev, true);
1485 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1488 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1489 * @dev: The device that is attached to the genpd, which needs to be resumed.
1491 * This routine should typically be called for a device that needs to be resumed
1492 * during the syscore resume phase. It may also be called during suspend-to-idle
1493 * to resume a corresponding CPU device that is attached to a genpd.
1495 void dev_pm_genpd_resume(struct device *dev)
1497 genpd_switch_state(dev, false);
1499 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1501 #else /* !CONFIG_PM_SLEEP */
1503 #define genpd_prepare NULL
1504 #define genpd_suspend_noirq NULL
1505 #define genpd_resume_noirq NULL
1506 #define genpd_freeze_noirq NULL
1507 #define genpd_thaw_noirq NULL
1508 #define genpd_poweroff_noirq NULL
1509 #define genpd_restore_noirq NULL
1510 #define genpd_complete NULL
1512 #endif /* CONFIG_PM_SLEEP */
1514 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1517 struct generic_pm_domain_data *gpd_data;
1518 struct gpd_timing_data *td;
1521 ret = dev_pm_get_subsys_data(dev);
1523 return ERR_PTR(ret);
1525 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1531 gpd_data->base.dev = dev;
1532 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1534 /* Allocate data used by a governor. */
1536 td = kzalloc(sizeof(*td), GFP_KERNEL);
1542 td->constraint_changed = true;
1543 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1544 td->next_wakeup = KTIME_MAX;
1548 spin_lock_irq(&dev->power.lock);
1550 if (dev->power.subsys_data->domain_data)
1553 dev->power.subsys_data->domain_data = &gpd_data->base;
1555 spin_unlock_irq(&dev->power.lock);
1563 kfree(gpd_data->td);
1566 dev_pm_put_subsys_data(dev);
1567 return ERR_PTR(ret);
1570 static void genpd_free_dev_data(struct device *dev,
1571 struct generic_pm_domain_data *gpd_data)
1573 spin_lock_irq(&dev->power.lock);
1575 dev->power.subsys_data->domain_data = NULL;
1577 spin_unlock_irq(&dev->power.lock);
1579 kfree(gpd_data->td);
1581 dev_pm_put_subsys_data(dev);
1584 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1585 int cpu, bool set, unsigned int depth)
1587 struct gpd_link *link;
1589 if (!genpd_is_cpu_domain(genpd))
1592 list_for_each_entry(link, &genpd->child_links, child_node) {
1593 struct generic_pm_domain *parent = link->parent;
1595 genpd_lock_nested(parent, depth + 1);
1596 genpd_update_cpumask(parent, cpu, set, depth + 1);
1597 genpd_unlock(parent);
1601 cpumask_set_cpu(cpu, genpd->cpus);
1603 cpumask_clear_cpu(cpu, genpd->cpus);
1606 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1609 genpd_update_cpumask(genpd, cpu, true, 0);
1612 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1615 genpd_update_cpumask(genpd, cpu, false, 0);
1618 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1622 if (!genpd_is_cpu_domain(genpd))
1625 for_each_possible_cpu(cpu) {
1626 if (get_cpu_device(cpu) == dev)
1633 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1634 struct device *base_dev)
1636 struct genpd_governor_data *gd = genpd->gd;
1637 struct generic_pm_domain_data *gpd_data;
1640 dev_dbg(dev, "%s()\n", __func__);
1642 gpd_data = genpd_alloc_dev_data(dev, gd);
1643 if (IS_ERR(gpd_data))
1644 return PTR_ERR(gpd_data);
1646 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1648 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1654 genpd_set_cpumask(genpd, gpd_data->cpu);
1655 dev_pm_domain_set(dev, &genpd->domain);
1657 genpd->device_count++;
1659 gd->max_off_time_changed = true;
1661 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1663 genpd_unlock(genpd);
1666 genpd_free_dev_data(dev, gpd_data);
1668 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1669 DEV_PM_QOS_RESUME_LATENCY);
1675 * pm_genpd_add_device - Add a device to an I/O PM domain.
1676 * @genpd: PM domain to add the device to.
1677 * @dev: Device to be added.
1679 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1686 mutex_lock(&gpd_list_lock);
1687 ret = genpd_add_device(genpd, dev, dev);
1688 mutex_unlock(&gpd_list_lock);
1692 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1694 static int genpd_remove_device(struct generic_pm_domain *genpd,
1697 struct generic_pm_domain_data *gpd_data;
1698 struct pm_domain_data *pdd;
1701 dev_dbg(dev, "%s()\n", __func__);
1703 pdd = dev->power.subsys_data->domain_data;
1704 gpd_data = to_gpd_data(pdd);
1705 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1706 DEV_PM_QOS_RESUME_LATENCY);
1710 if (genpd->prepared_count > 0) {
1715 genpd->device_count--;
1717 genpd->gd->max_off_time_changed = true;
1719 genpd_clear_cpumask(genpd, gpd_data->cpu);
1720 dev_pm_domain_set(dev, NULL);
1722 list_del_init(&pdd->list_node);
1724 genpd_unlock(genpd);
1726 if (genpd->detach_dev)
1727 genpd->detach_dev(genpd, dev);
1729 genpd_free_dev_data(dev, gpd_data);
1734 genpd_unlock(genpd);
1735 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1741 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1742 * @dev: Device to be removed.
1744 int pm_genpd_remove_device(struct device *dev)
1746 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1751 return genpd_remove_device(genpd, dev);
1753 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1756 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1758 * @dev: Device that should be associated with the notifier
1759 * @nb: The notifier block to register
1761 * Users may call this function to add a genpd power on/off notifier for an
1762 * attached @dev. Only one notifier per device is allowed. The notifier is
1763 * sent when genpd is powering on/off the PM domain.
1765 * It is assumed that the user guarantee that the genpd wouldn't be detached
1766 * while this routine is getting called.
1768 * Returns 0 on success and negative error values on failures.
1770 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1772 struct generic_pm_domain *genpd;
1773 struct generic_pm_domain_data *gpd_data;
1776 genpd = dev_to_genpd_safe(dev);
1780 if (WARN_ON(!dev->power.subsys_data ||
1781 !dev->power.subsys_data->domain_data))
1784 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1785 if (gpd_data->power_nb)
1789 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1790 genpd_unlock(genpd);
1793 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1798 gpd_data->power_nb = nb;
1801 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1804 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1806 * @dev: Device that is associated with the notifier
1808 * Users may call this function to remove a genpd power on/off notifier for an
1811 * It is assumed that the user guarantee that the genpd wouldn't be detached
1812 * while this routine is getting called.
1814 * Returns 0 on success and negative error values on failures.
1816 int dev_pm_genpd_remove_notifier(struct device *dev)
1818 struct generic_pm_domain *genpd;
1819 struct generic_pm_domain_data *gpd_data;
1822 genpd = dev_to_genpd_safe(dev);
1826 if (WARN_ON(!dev->power.subsys_data ||
1827 !dev->power.subsys_data->domain_data))
1830 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1831 if (!gpd_data->power_nb)
1835 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1836 gpd_data->power_nb);
1837 genpd_unlock(genpd);
1840 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1845 gpd_data->power_nb = NULL;
1848 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1850 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1851 struct generic_pm_domain *subdomain)
1853 struct gpd_link *link, *itr;
1856 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1857 || genpd == subdomain)
1861 * If the domain can be powered on/off in an IRQ safe
1862 * context, ensure that the subdomain can also be
1863 * powered on/off in that context.
1865 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1866 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1867 genpd->name, subdomain->name);
1871 link = kzalloc(sizeof(*link), GFP_KERNEL);
1875 genpd_lock(subdomain);
1876 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1878 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1883 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1884 if (itr->child == subdomain && itr->parent == genpd) {
1890 link->parent = genpd;
1891 list_add_tail(&link->parent_node, &genpd->parent_links);
1892 link->child = subdomain;
1893 list_add_tail(&link->child_node, &subdomain->child_links);
1894 if (genpd_status_on(subdomain))
1895 genpd_sd_counter_inc(genpd);
1898 genpd_unlock(genpd);
1899 genpd_unlock(subdomain);
1906 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1907 * @genpd: Leader PM domain to add the subdomain to.
1908 * @subdomain: Subdomain to be added.
1910 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1911 struct generic_pm_domain *subdomain)
1915 mutex_lock(&gpd_list_lock);
1916 ret = genpd_add_subdomain(genpd, subdomain);
1917 mutex_unlock(&gpd_list_lock);
1921 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1924 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1925 * @genpd: Leader PM domain to remove the subdomain from.
1926 * @subdomain: Subdomain to be removed.
1928 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1929 struct generic_pm_domain *subdomain)
1931 struct gpd_link *l, *link;
1934 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1937 genpd_lock(subdomain);
1938 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1940 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1941 pr_warn("%s: unable to remove subdomain %s\n",
1942 genpd->name, subdomain->name);
1947 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1948 if (link->child != subdomain)
1951 list_del(&link->parent_node);
1952 list_del(&link->child_node);
1954 if (genpd_status_on(subdomain))
1955 genpd_sd_counter_dec(genpd);
1962 genpd_unlock(genpd);
1963 genpd_unlock(subdomain);
1967 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1969 static void genpd_free_default_power_state(struct genpd_power_state *states,
1970 unsigned int state_count)
1975 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1977 struct genpd_power_state *state;
1979 state = kzalloc(sizeof(*state), GFP_KERNEL);
1983 genpd->states = state;
1984 genpd->state_count = 1;
1985 genpd->free_states = genpd_free_default_power_state;
1990 static int genpd_alloc_data(struct generic_pm_domain *genpd)
1992 struct genpd_governor_data *gd = NULL;
1995 if (genpd_is_cpu_domain(genpd) &&
1996 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2000 gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2006 gd->max_off_time_ns = -1;
2007 gd->max_off_time_changed = true;
2008 gd->next_wakeup = KTIME_MAX;
2009 gd->next_hrtimer = KTIME_MAX;
2012 /* Use only one "off" state if there were no states declared */
2013 if (genpd->state_count == 0) {
2014 ret = genpd_set_default_power_state(genpd);
2023 if (genpd_is_cpu_domain(genpd))
2024 free_cpumask_var(genpd->cpus);
2029 static void genpd_free_data(struct generic_pm_domain *genpd)
2031 if (genpd_is_cpu_domain(genpd))
2032 free_cpumask_var(genpd->cpus);
2033 if (genpd->free_states)
2034 genpd->free_states(genpd->states, genpd->state_count);
2038 static void genpd_lock_init(struct generic_pm_domain *genpd)
2040 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2041 spin_lock_init(&genpd->slock);
2042 genpd->lock_ops = &genpd_spin_ops;
2044 mutex_init(&genpd->mlock);
2045 genpd->lock_ops = &genpd_mtx_ops;
2050 * pm_genpd_init - Initialize a generic I/O PM domain object.
2051 * @genpd: PM domain object to initialize.
2052 * @gov: PM domain governor to associate with the domain (may be NULL).
2053 * @is_off: Initial value of the domain's power_is_off field.
2055 * Returns 0 on successful initialization, else a negative error code.
2057 int pm_genpd_init(struct generic_pm_domain *genpd,
2058 struct dev_power_governor *gov, bool is_off)
2062 if (IS_ERR_OR_NULL(genpd))
2065 INIT_LIST_HEAD(&genpd->parent_links);
2066 INIT_LIST_HEAD(&genpd->child_links);
2067 INIT_LIST_HEAD(&genpd->dev_list);
2068 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2069 genpd_lock_init(genpd);
2071 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2072 atomic_set(&genpd->sd_count, 0);
2073 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2074 genpd->device_count = 0;
2075 genpd->provider = NULL;
2076 genpd->has_provider = false;
2077 genpd->accounting_time = ktime_get_mono_fast_ns();
2078 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2079 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2080 genpd->domain.ops.prepare = genpd_prepare;
2081 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2082 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2083 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2084 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2085 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2086 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2087 genpd->domain.ops.complete = genpd_complete;
2088 genpd->domain.start = genpd_dev_pm_start;
2089 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2091 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2092 genpd->dev_ops.stop = pm_clk_suspend;
2093 genpd->dev_ops.start = pm_clk_resume;
2096 /* The always-on governor works better with the corresponding flag. */
2097 if (gov == &pm_domain_always_on_gov)
2098 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2100 /* Always-on domains must be powered on at initialization. */
2101 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2102 !genpd_status_on(genpd)) {
2103 pr_err("always-on PM domain %s is not on\n", genpd->name);
2107 /* Multiple states but no governor doesn't make sense. */
2108 if (!gov && genpd->state_count > 1)
2109 pr_warn("%s: no governor for states\n", genpd->name);
2111 ret = genpd_alloc_data(genpd);
2115 device_initialize(&genpd->dev);
2116 dev_set_name(&genpd->dev, "%s", genpd->name);
2118 mutex_lock(&gpd_list_lock);
2119 list_add(&genpd->gpd_list_node, &gpd_list);
2120 mutex_unlock(&gpd_list_lock);
2121 genpd_debug_add(genpd);
2125 EXPORT_SYMBOL_GPL(pm_genpd_init);
2127 static int genpd_remove(struct generic_pm_domain *genpd)
2129 struct gpd_link *l, *link;
2131 if (IS_ERR_OR_NULL(genpd))
2136 if (genpd->has_provider) {
2137 genpd_unlock(genpd);
2138 pr_err("Provider present, unable to remove %s\n", genpd->name);
2142 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2143 genpd_unlock(genpd);
2144 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2148 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2149 list_del(&link->parent_node);
2150 list_del(&link->child_node);
2154 list_del(&genpd->gpd_list_node);
2155 genpd_unlock(genpd);
2156 genpd_debug_remove(genpd);
2157 cancel_work_sync(&genpd->power_off_work);
2158 genpd_free_data(genpd);
2160 pr_debug("%s: removed %s\n", __func__, genpd->name);
2166 * pm_genpd_remove - Remove a generic I/O PM domain
2167 * @genpd: Pointer to PM domain that is to be removed.
2169 * To remove the PM domain, this function:
2170 * - Removes the PM domain as a subdomain to any parent domains,
2172 * - Removes the PM domain from the list of registered PM domains.
2174 * The PM domain will only be removed, if the associated provider has
2175 * been removed, it is not a parent to any other PM domain and has no
2176 * devices associated with it.
2178 int pm_genpd_remove(struct generic_pm_domain *genpd)
2182 mutex_lock(&gpd_list_lock);
2183 ret = genpd_remove(genpd);
2184 mutex_unlock(&gpd_list_lock);
2188 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2190 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2193 * Device Tree based PM domain providers.
2195 * The code below implements generic device tree based PM domain providers that
2196 * bind device tree nodes with generic PM domains registered in the system.
2198 * Any driver that registers generic PM domains and needs to support binding of
2199 * devices to these domains is supposed to register a PM domain provider, which
2200 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2202 * Two simple mapping functions have been provided for convenience:
2203 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2204 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2209 * struct of_genpd_provider - PM domain provider registration structure
2210 * @link: Entry in global list of PM domain providers
2211 * @node: Pointer to device tree node of PM domain provider
2212 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2214 * @data: context pointer to be passed into @xlate callback
2216 struct of_genpd_provider {
2217 struct list_head link;
2218 struct device_node *node;
2219 genpd_xlate_t xlate;
2223 /* List of registered PM domain providers. */
2224 static LIST_HEAD(of_genpd_providers);
2225 /* Mutex to protect the list above. */
2226 static DEFINE_MUTEX(of_genpd_mutex);
2229 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2230 * @genpdspec: OF phandle args to map into a PM domain
2231 * @data: xlate function private data - pointer to struct generic_pm_domain
2233 * This is a generic xlate function that can be used to model PM domains that
2234 * have their own device tree nodes. The private data of xlate function needs
2235 * to be a valid pointer to struct generic_pm_domain.
2237 static struct generic_pm_domain *genpd_xlate_simple(
2238 struct of_phandle_args *genpdspec,
2245 * genpd_xlate_onecell() - Xlate function using a single index.
2246 * @genpdspec: OF phandle args to map into a PM domain
2247 * @data: xlate function private data - pointer to struct genpd_onecell_data
2249 * This is a generic xlate function that can be used to model simple PM domain
2250 * controllers that have one device tree node and provide multiple PM domains.
2251 * A single cell is used as an index into an array of PM domains specified in
2252 * the genpd_onecell_data struct when registering the provider.
2254 static struct generic_pm_domain *genpd_xlate_onecell(
2255 struct of_phandle_args *genpdspec,
2258 struct genpd_onecell_data *genpd_data = data;
2259 unsigned int idx = genpdspec->args[0];
2261 if (genpdspec->args_count != 1)
2262 return ERR_PTR(-EINVAL);
2264 if (idx >= genpd_data->num_domains) {
2265 pr_err("%s: invalid domain index %u\n", __func__, idx);
2266 return ERR_PTR(-EINVAL);
2269 if (!genpd_data->domains[idx])
2270 return ERR_PTR(-ENOENT);
2272 return genpd_data->domains[idx];
2276 * genpd_add_provider() - Register a PM domain provider for a node
2277 * @np: Device node pointer associated with the PM domain provider.
2278 * @xlate: Callback for decoding PM domain from phandle arguments.
2279 * @data: Context pointer for @xlate callback.
2281 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2284 struct of_genpd_provider *cp;
2286 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2290 cp->node = of_node_get(np);
2293 fwnode_dev_initialized(&np->fwnode, true);
2295 mutex_lock(&of_genpd_mutex);
2296 list_add(&cp->link, &of_genpd_providers);
2297 mutex_unlock(&of_genpd_mutex);
2298 pr_debug("Added domain provider from %pOF\n", np);
2303 static bool genpd_present(const struct generic_pm_domain *genpd)
2306 const struct generic_pm_domain *gpd;
2308 mutex_lock(&gpd_list_lock);
2309 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2315 mutex_unlock(&gpd_list_lock);
2321 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2322 * @np: Device node pointer associated with the PM domain provider.
2323 * @genpd: Pointer to PM domain associated with the PM domain provider.
2325 int of_genpd_add_provider_simple(struct device_node *np,
2326 struct generic_pm_domain *genpd)
2333 if (!genpd_present(genpd))
2336 genpd->dev.of_node = np;
2338 /* Parse genpd OPP table */
2339 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2340 ret = dev_pm_opp_of_add_table(&genpd->dev);
2342 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2345 * Save table for faster processing while setting performance
2348 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2349 WARN_ON(IS_ERR(genpd->opp_table));
2352 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2354 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2355 dev_pm_opp_put_opp_table(genpd->opp_table);
2356 dev_pm_opp_of_remove_table(&genpd->dev);
2362 genpd->provider = &np->fwnode;
2363 genpd->has_provider = true;
2367 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2370 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2371 * @np: Device node pointer associated with the PM domain provider.
2372 * @data: Pointer to the data associated with the PM domain provider.
2374 int of_genpd_add_provider_onecell(struct device_node *np,
2375 struct genpd_onecell_data *data)
2377 struct generic_pm_domain *genpd;
2385 data->xlate = genpd_xlate_onecell;
2387 for (i = 0; i < data->num_domains; i++) {
2388 genpd = data->domains[i];
2392 if (!genpd_present(genpd))
2395 genpd->dev.of_node = np;
2397 /* Parse genpd OPP table */
2398 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2399 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2401 dev_err_probe(&genpd->dev, ret,
2402 "Failed to add OPP table for index %d\n", i);
2407 * Save table for faster processing while setting
2408 * performance state.
2410 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2411 WARN_ON(IS_ERR(genpd->opp_table));
2414 genpd->provider = &np->fwnode;
2415 genpd->has_provider = true;
2418 ret = genpd_add_provider(np, data->xlate, data);
2426 genpd = data->domains[i];
2431 genpd->provider = NULL;
2432 genpd->has_provider = false;
2434 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2435 dev_pm_opp_put_opp_table(genpd->opp_table);
2436 dev_pm_opp_of_remove_table(&genpd->dev);
2442 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2445 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2446 * @np: Device node pointer associated with the PM domain provider
2448 void of_genpd_del_provider(struct device_node *np)
2450 struct of_genpd_provider *cp, *tmp;
2451 struct generic_pm_domain *gpd;
2453 mutex_lock(&gpd_list_lock);
2454 mutex_lock(&of_genpd_mutex);
2455 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2456 if (cp->node == np) {
2458 * For each PM domain associated with the
2459 * provider, set the 'has_provider' to false
2460 * so that the PM domain can be safely removed.
2462 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2463 if (gpd->provider == &np->fwnode) {
2464 gpd->has_provider = false;
2466 if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
2469 dev_pm_opp_put_opp_table(gpd->opp_table);
2470 dev_pm_opp_of_remove_table(&gpd->dev);
2474 fwnode_dev_initialized(&cp->node->fwnode, false);
2475 list_del(&cp->link);
2476 of_node_put(cp->node);
2481 mutex_unlock(&of_genpd_mutex);
2482 mutex_unlock(&gpd_list_lock);
2484 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2487 * genpd_get_from_provider() - Look-up PM domain
2488 * @genpdspec: OF phandle args to use for look-up
2490 * Looks for a PM domain provider under the node specified by @genpdspec and if
2491 * found, uses xlate function of the provider to map phandle args to a PM
2494 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2497 static struct generic_pm_domain *genpd_get_from_provider(
2498 struct of_phandle_args *genpdspec)
2500 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2501 struct of_genpd_provider *provider;
2504 return ERR_PTR(-EINVAL);
2506 mutex_lock(&of_genpd_mutex);
2508 /* Check if we have such a provider in our array */
2509 list_for_each_entry(provider, &of_genpd_providers, link) {
2510 if (provider->node == genpdspec->np)
2511 genpd = provider->xlate(genpdspec, provider->data);
2516 mutex_unlock(&of_genpd_mutex);
2522 * of_genpd_add_device() - Add a device to an I/O PM domain
2523 * @genpdspec: OF phandle args to use for look-up PM domain
2524 * @dev: Device to be added.
2526 * Looks-up an I/O PM domain based upon phandle args provided and adds
2527 * the device to the PM domain. Returns a negative error code on failure.
2529 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2531 struct generic_pm_domain *genpd;
2537 mutex_lock(&gpd_list_lock);
2539 genpd = genpd_get_from_provider(genpdspec);
2540 if (IS_ERR(genpd)) {
2541 ret = PTR_ERR(genpd);
2545 ret = genpd_add_device(genpd, dev, dev);
2548 mutex_unlock(&gpd_list_lock);
2552 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2555 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2556 * @parent_spec: OF phandle args to use for parent PM domain look-up
2557 * @subdomain_spec: OF phandle args to use for subdomain look-up
2559 * Looks-up a parent PM domain and subdomain based upon phandle args
2560 * provided and adds the subdomain to the parent PM domain. Returns a
2561 * negative error code on failure.
2563 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2564 struct of_phandle_args *subdomain_spec)
2566 struct generic_pm_domain *parent, *subdomain;
2569 mutex_lock(&gpd_list_lock);
2571 parent = genpd_get_from_provider(parent_spec);
2572 if (IS_ERR(parent)) {
2573 ret = PTR_ERR(parent);
2577 subdomain = genpd_get_from_provider(subdomain_spec);
2578 if (IS_ERR(subdomain)) {
2579 ret = PTR_ERR(subdomain);
2583 ret = genpd_add_subdomain(parent, subdomain);
2586 mutex_unlock(&gpd_list_lock);
2588 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2590 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2593 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2594 * @parent_spec: OF phandle args to use for parent PM domain look-up
2595 * @subdomain_spec: OF phandle args to use for subdomain look-up
2597 * Looks-up a parent PM domain and subdomain based upon phandle args
2598 * provided and removes the subdomain from the parent PM domain. Returns a
2599 * negative error code on failure.
2601 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2602 struct of_phandle_args *subdomain_spec)
2604 struct generic_pm_domain *parent, *subdomain;
2607 mutex_lock(&gpd_list_lock);
2609 parent = genpd_get_from_provider(parent_spec);
2610 if (IS_ERR(parent)) {
2611 ret = PTR_ERR(parent);
2615 subdomain = genpd_get_from_provider(subdomain_spec);
2616 if (IS_ERR(subdomain)) {
2617 ret = PTR_ERR(subdomain);
2621 ret = pm_genpd_remove_subdomain(parent, subdomain);
2624 mutex_unlock(&gpd_list_lock);
2628 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2631 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2632 * @np: Pointer to device node associated with provider
2634 * Find the last PM domain that was added by a particular provider and
2635 * remove this PM domain from the list of PM domains. The provider is
2636 * identified by the 'provider' device structure that is passed. The PM
2637 * domain will only be removed, if the provider associated with domain
2640 * Returns a valid pointer to struct generic_pm_domain on success or
2641 * ERR_PTR() on failure.
2643 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2645 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2648 if (IS_ERR_OR_NULL(np))
2649 return ERR_PTR(-EINVAL);
2651 mutex_lock(&gpd_list_lock);
2652 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2653 if (gpd->provider == &np->fwnode) {
2654 ret = genpd_remove(gpd);
2655 genpd = ret ? ERR_PTR(ret) : gpd;
2659 mutex_unlock(&gpd_list_lock);
2663 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2665 static void genpd_release_dev(struct device *dev)
2667 of_node_put(dev->of_node);
2671 static const struct bus_type genpd_bus_type = {
2676 * genpd_dev_pm_detach - Detach a device from its PM domain.
2677 * @dev: Device to detach.
2678 * @power_off: Currently not used
2680 * Try to locate a corresponding generic PM domain, which the device was
2681 * attached to previously. If such is found, the device is detached from it.
2683 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2685 struct generic_pm_domain *pd;
2689 pd = dev_to_genpd(dev);
2693 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2695 /* Drop the default performance state */
2696 if (dev_gpd_data(dev)->default_pstate) {
2697 dev_pm_genpd_set_performance_state(dev, 0);
2698 dev_gpd_data(dev)->default_pstate = 0;
2701 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2702 ret = genpd_remove_device(pd, dev);
2711 dev_err(dev, "failed to remove from PM domain %s: %d",
2716 /* Check if PM domain can be powered off after removing this device. */
2717 genpd_queue_power_off_work(pd);
2719 /* Unregister the device if it was created by genpd. */
2720 if (dev->bus == &genpd_bus_type)
2721 device_unregister(dev);
2724 static void genpd_dev_pm_sync(struct device *dev)
2726 struct generic_pm_domain *pd;
2728 pd = dev_to_genpd(dev);
2732 genpd_queue_power_off_work(pd);
2735 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2736 unsigned int index, bool power_on)
2738 struct of_phandle_args pd_args;
2739 struct generic_pm_domain *pd;
2743 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2744 "#power-domain-cells", index, &pd_args);
2748 mutex_lock(&gpd_list_lock);
2749 pd = genpd_get_from_provider(&pd_args);
2750 of_node_put(pd_args.np);
2752 mutex_unlock(&gpd_list_lock);
2753 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2754 __func__, PTR_ERR(pd));
2755 return driver_deferred_probe_check_state(base_dev);
2758 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2760 ret = genpd_add_device(pd, dev, base_dev);
2761 mutex_unlock(&gpd_list_lock);
2764 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2766 dev->pm_domain->detach = genpd_dev_pm_detach;
2767 dev->pm_domain->sync = genpd_dev_pm_sync;
2769 /* Set the default performance state */
2770 pstate = of_get_required_opp_performance_state(dev->of_node, index);
2771 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2774 } else if (pstate > 0) {
2775 ret = dev_pm_genpd_set_performance_state(dev, pstate);
2778 dev_gpd_data(dev)->default_pstate = pstate;
2783 ret = genpd_power_on(pd, 0);
2788 /* Drop the default performance state */
2789 if (dev_gpd_data(dev)->default_pstate) {
2790 dev_pm_genpd_set_performance_state(dev, 0);
2791 dev_gpd_data(dev)->default_pstate = 0;
2794 genpd_remove_device(pd, dev);
2795 return -EPROBE_DEFER;
2801 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2803 genpd_remove_device(pd, dev);
2808 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2809 * @dev: Device to attach.
2811 * Parse device's OF node to find a PM domain specifier. If such is found,
2812 * attaches the device to retrieved pm_domain ops.
2814 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2815 * PM domain or when multiple power-domains exists for it, else a negative error
2816 * code. Note that if a power-domain exists for the device, but it cannot be
2817 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2818 * not probed and to re-try again later.
2820 int genpd_dev_pm_attach(struct device *dev)
2826 * Devices with multiple PM domains must be attached separately, as we
2827 * can only attach one PM domain per device.
2829 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2830 "#power-domain-cells") != 1)
2833 return __genpd_dev_pm_attach(dev, dev, 0, true);
2835 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2838 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2839 * @dev: The device used to lookup the PM domain.
2840 * @index: The index of the PM domain.
2842 * Parse device's OF node to find a PM domain specifier at the provided @index.
2843 * If such is found, creates a virtual device and attaches it to the retrieved
2844 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2845 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2847 * Returns the created virtual device if successfully attached PM domain, NULL
2848 * when the device don't need a PM domain, else an ERR_PTR() in case of
2849 * failures. If a power-domain exists for the device, but cannot be found or
2850 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2851 * is not probed and to re-try again later.
2853 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2856 struct device *virt_dev;
2863 /* Verify that the index is within a valid range. */
2864 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2865 "#power-domain-cells");
2866 if (index >= num_domains)
2869 /* Allocate and register device on the genpd bus. */
2870 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2872 return ERR_PTR(-ENOMEM);
2874 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2875 virt_dev->bus = &genpd_bus_type;
2876 virt_dev->release = genpd_release_dev;
2877 virt_dev->of_node = of_node_get(dev->of_node);
2879 ret = device_register(virt_dev);
2881 put_device(virt_dev);
2882 return ERR_PTR(ret);
2885 /* Try to attach the device to the PM domain at the specified index. */
2886 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2888 device_unregister(virt_dev);
2889 return ret ? ERR_PTR(ret) : NULL;
2892 pm_runtime_enable(virt_dev);
2893 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2897 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2900 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2901 * @dev: The device used to lookup the PM domain.
2902 * @name: The name of the PM domain.
2904 * Parse device's OF node to find a PM domain specifier using the
2905 * power-domain-names DT property. For further description see
2906 * genpd_dev_pm_attach_by_id().
2908 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2915 index = of_property_match_string(dev->of_node, "power-domain-names",
2920 return genpd_dev_pm_attach_by_id(dev, index);
2923 static const struct of_device_id idle_state_match[] = {
2924 { .compatible = "domain-idle-state", },
2928 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2929 struct device_node *state_node)
2933 u32 entry_latency, exit_latency;
2935 err = of_property_read_u32(state_node, "entry-latency-us",
2938 pr_debug(" * %pOF missing entry-latency-us property\n",
2943 err = of_property_read_u32(state_node, "exit-latency-us",
2946 pr_debug(" * %pOF missing exit-latency-us property\n",
2951 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2953 genpd_state->residency_ns = 1000LL * residency;
2955 genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2956 genpd_state->power_off_latency_ns = 1000LL * entry_latency;
2957 genpd_state->fwnode = &state_node->fwnode;
2962 static int genpd_iterate_idle_states(struct device_node *dn,
2963 struct genpd_power_state *states)
2966 struct of_phandle_iterator it;
2967 struct device_node *np;
2970 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2972 return ret == -ENOENT ? 0 : ret;
2974 /* Loop over the phandles until all the requested entry is found */
2975 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2977 if (!of_match_node(idle_state_match, np))
2980 if (!of_device_is_available(np))
2984 ret = genpd_parse_state(&states[i], np);
2986 pr_err("Parsing idle state node %pOF failed with err %d\n",
2999 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3001 * @dn: The genpd device node
3002 * @states: The pointer to which the state array will be saved.
3003 * @n: The count of elements in the array returned from this function.
3005 * Returns the device states parsed from the OF node. The memory for the states
3006 * is allocated by this function and is the responsibility of the caller to
3007 * free the memory after use. If any or zero compatible domain idle states is
3008 * found it returns 0 and in case of errors, a negative error code is returned.
3010 int of_genpd_parse_idle_states(struct device_node *dn,
3011 struct genpd_power_state **states, int *n)
3013 struct genpd_power_state *st;
3016 ret = genpd_iterate_idle_states(dn, NULL);
3026 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3030 ret = genpd_iterate_idle_states(dn, st);
3033 return ret < 0 ? ret : -EINVAL;
3041 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3043 static int __init genpd_bus_init(void)
3045 return bus_register(&genpd_bus_type);
3047 core_initcall(genpd_bus_init);
3049 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3052 /*** debugfs support ***/
3054 #ifdef CONFIG_DEBUG_FS
3056 * TODO: This function is a slightly modified version of rtpm_status_show
3057 * from sysfs.c, so generalize it.
3059 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3061 static const char * const status_lookup[] = {
3062 [RPM_ACTIVE] = "active",
3063 [RPM_RESUMING] = "resuming",
3064 [RPM_SUSPENDED] = "suspended",
3065 [RPM_SUSPENDING] = "suspending"
3069 if (dev->power.runtime_error)
3071 else if (dev->power.disable_depth)
3073 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3074 p = status_lookup[dev->power.runtime_status];
3078 seq_printf(s, "%-25s ", p);
3081 static void perf_status_str(struct seq_file *s, struct device *dev)
3083 struct generic_pm_domain_data *gpd_data;
3085 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3086 seq_put_decimal_ull(s, "", gpd_data->performance_state);
3089 static int genpd_summary_one(struct seq_file *s,
3090 struct generic_pm_domain *genpd)
3092 static const char * const status_lookup[] = {
3093 [GENPD_STATE_ON] = "on",
3094 [GENPD_STATE_OFF] = "off"
3096 struct pm_domain_data *pm_data;
3097 const char *kobj_path;
3098 struct gpd_link *link;
3102 ret = genpd_lock_interruptible(genpd);
3104 return -ERESTARTSYS;
3106 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3108 if (!genpd_status_on(genpd))
3109 snprintf(state, sizeof(state), "%s-%u",
3110 status_lookup[genpd->status], genpd->state_idx);
3112 snprintf(state, sizeof(state), "%s",
3113 status_lookup[genpd->status]);
3114 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
3117 * Modifications on the list require holding locks on both
3118 * parent and child, so we are safe.
3119 * Also genpd->name is immutable.
3121 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3122 if (list_is_first(&link->parent_node, &genpd->parent_links))
3123 seq_printf(s, "\n%48s", " ");
3124 seq_printf(s, "%s", link->child->name);
3125 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3129 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3130 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3131 genpd_is_irq_safe(genpd) ?
3132 GFP_ATOMIC : GFP_KERNEL);
3133 if (kobj_path == NULL)
3136 seq_printf(s, "\n %-50s ", kobj_path);
3137 rtpm_status_str(s, pm_data->dev);
3138 perf_status_str(s, pm_data->dev);
3144 genpd_unlock(genpd);
3149 static int summary_show(struct seq_file *s, void *data)
3151 struct generic_pm_domain *genpd;
3154 seq_puts(s, "domain status children performance\n");
3155 seq_puts(s, " /device runtime status\n");
3156 seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3158 ret = mutex_lock_interruptible(&gpd_list_lock);
3160 return -ERESTARTSYS;
3162 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3163 ret = genpd_summary_one(s, genpd);
3167 mutex_unlock(&gpd_list_lock);
3172 static int status_show(struct seq_file *s, void *data)
3174 static const char * const status_lookup[] = {
3175 [GENPD_STATE_ON] = "on",
3176 [GENPD_STATE_OFF] = "off"
3179 struct generic_pm_domain *genpd = s->private;
3182 ret = genpd_lock_interruptible(genpd);
3184 return -ERESTARTSYS;
3186 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3189 if (genpd->status == GENPD_STATE_OFF)
3190 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3193 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3195 genpd_unlock(genpd);
3199 static int sub_domains_show(struct seq_file *s, void *data)
3201 struct generic_pm_domain *genpd = s->private;
3202 struct gpd_link *link;
3205 ret = genpd_lock_interruptible(genpd);
3207 return -ERESTARTSYS;
3209 list_for_each_entry(link, &genpd->parent_links, parent_node)
3210 seq_printf(s, "%s\n", link->child->name);
3212 genpd_unlock(genpd);
3216 static int idle_states_show(struct seq_file *s, void *data)
3218 struct generic_pm_domain *genpd = s->private;
3219 u64 now, delta, idle_time = 0;
3223 ret = genpd_lock_interruptible(genpd);
3225 return -ERESTARTSYS;
3227 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
3229 for (i = 0; i < genpd->state_count; i++) {
3230 idle_time += genpd->states[i].idle_time;
3232 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3233 now = ktime_get_mono_fast_ns();
3234 if (now > genpd->accounting_time) {
3235 delta = now - genpd->accounting_time;
3240 do_div(idle_time, NSEC_PER_MSEC);
3241 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3242 genpd->states[i].usage, genpd->states[i].rejected);
3245 genpd_unlock(genpd);
3249 static int active_time_show(struct seq_file *s, void *data)
3251 struct generic_pm_domain *genpd = s->private;
3252 u64 now, on_time, delta = 0;
3255 ret = genpd_lock_interruptible(genpd);
3257 return -ERESTARTSYS;
3259 if (genpd->status == GENPD_STATE_ON) {
3260 now = ktime_get_mono_fast_ns();
3261 if (now > genpd->accounting_time)
3262 delta = now - genpd->accounting_time;
3265 on_time = genpd->on_time + delta;
3266 do_div(on_time, NSEC_PER_MSEC);
3267 seq_printf(s, "%llu ms\n", on_time);
3269 genpd_unlock(genpd);
3273 static int total_idle_time_show(struct seq_file *s, void *data)
3275 struct generic_pm_domain *genpd = s->private;
3276 u64 now, delta, total = 0;
3280 ret = genpd_lock_interruptible(genpd);
3282 return -ERESTARTSYS;
3284 for (i = 0; i < genpd->state_count; i++) {
3285 total += genpd->states[i].idle_time;
3287 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3288 now = ktime_get_mono_fast_ns();
3289 if (now > genpd->accounting_time) {
3290 delta = now - genpd->accounting_time;
3296 do_div(total, NSEC_PER_MSEC);
3297 seq_printf(s, "%llu ms\n", total);
3299 genpd_unlock(genpd);
3304 static int devices_show(struct seq_file *s, void *data)
3306 struct generic_pm_domain *genpd = s->private;
3307 struct pm_domain_data *pm_data;
3308 const char *kobj_path;
3311 ret = genpd_lock_interruptible(genpd);
3313 return -ERESTARTSYS;
3315 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3316 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3317 genpd_is_irq_safe(genpd) ?
3318 GFP_ATOMIC : GFP_KERNEL);
3319 if (kobj_path == NULL)
3322 seq_printf(s, "%s\n", kobj_path);
3326 genpd_unlock(genpd);
3330 static int perf_state_show(struct seq_file *s, void *data)
3332 struct generic_pm_domain *genpd = s->private;
3334 if (genpd_lock_interruptible(genpd))
3335 return -ERESTARTSYS;
3337 seq_printf(s, "%u\n", genpd->performance_state);
3339 genpd_unlock(genpd);
3343 DEFINE_SHOW_ATTRIBUTE(summary);
3344 DEFINE_SHOW_ATTRIBUTE(status);
3345 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3346 DEFINE_SHOW_ATTRIBUTE(idle_states);
3347 DEFINE_SHOW_ATTRIBUTE(active_time);
3348 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3349 DEFINE_SHOW_ATTRIBUTE(devices);
3350 DEFINE_SHOW_ATTRIBUTE(perf_state);
3352 static void genpd_debug_add(struct generic_pm_domain *genpd)
3356 if (!genpd_debugfs_dir)
3359 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3361 debugfs_create_file("current_state", 0444,
3362 d, genpd, &status_fops);
3363 debugfs_create_file("sub_domains", 0444,
3364 d, genpd, &sub_domains_fops);
3365 debugfs_create_file("idle_states", 0444,
3366 d, genpd, &idle_states_fops);
3367 debugfs_create_file("active_time", 0444,
3368 d, genpd, &active_time_fops);
3369 debugfs_create_file("total_idle_time", 0444,
3370 d, genpd, &total_idle_time_fops);
3371 debugfs_create_file("devices", 0444,
3372 d, genpd, &devices_fops);
3373 if (genpd->set_performance_state)
3374 debugfs_create_file("perf_state", 0444,
3375 d, genpd, &perf_state_fops);
3378 static int __init genpd_debug_init(void)
3380 struct generic_pm_domain *genpd;
3382 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3384 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3385 NULL, &summary_fops);
3387 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3388 genpd_debug_add(genpd);
3392 late_initcall(genpd_debug_init);
3394 static void __exit genpd_debug_exit(void)
3396 debugfs_remove_recursive(genpd_debugfs_dir);
3398 __exitcall(genpd_debug_exit);
3399 #endif /* CONFIG_DEBUG_FS */