2 * cpuidle.c - core cpuidle infrastructure
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
8 * This code is licenced under the GPL.
11 #include <linux/clockchips.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/notifier.h>
17 #include <linux/pm_qos.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/ktime.h>
21 #include <linux/hrtimer.h>
22 #include <linux/module.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <trace/events/power.h>
29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
32 DEFINE_MUTEX(cpuidle_lock);
33 LIST_HEAD(cpuidle_detected_devices);
35 static int enabled_devices;
36 static int off __read_mostly;
37 static int initialized __read_mostly;
39 int cpuidle_disabled(void)
43 void disable_cpuidle(void)
48 bool cpuidle_not_available(struct cpuidle_driver *drv,
49 struct cpuidle_device *dev)
51 return off || !initialized || !drv || !dev || !dev->enabled;
55 * cpuidle_play_dead - cpu off-lining
57 * Returns in case of an error or no driver
59 int cpuidle_play_dead(void)
61 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
62 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
68 /* Find lowest-power state that supports long-term idle */
69 for (i = drv->state_count - 1; i >= 0; i--)
70 if (drv->states[i].enter_dead)
71 return drv->states[i].enter_dead(dev, i);
76 static int find_deepest_state(struct cpuidle_driver *drv,
77 struct cpuidle_device *dev,
78 unsigned int max_latency,
79 unsigned int forbidden_flags,
82 unsigned int latency_req = 0;
85 for (i = 1; i < drv->state_count; i++) {
86 struct cpuidle_state *s = &drv->states[i];
87 struct cpuidle_state_usage *su = &dev->states_usage[i];
89 if (s->disabled || su->disable || s->exit_latency <= latency_req
90 || s->exit_latency > max_latency
91 || (s->flags & forbidden_flags)
92 || (s2idle && !s->enter_s2idle))
95 latency_req = s->exit_latency;
102 * cpuidle_use_deepest_state - Set/clear governor override flag.
103 * @enable: New value of the flag.
105 * Set/unset the current CPU to use the deepest idle state (override governors
106 * going forward if set).
108 void cpuidle_use_deepest_state(bool enable)
110 struct cpuidle_device *dev;
113 dev = cpuidle_get_device();
115 dev->use_deepest_state = enable;
120 * cpuidle_find_deepest_state - Find the deepest available idle state.
121 * @drv: cpuidle driver for the given CPU.
122 * @dev: cpuidle device for the given CPU.
124 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
125 struct cpuidle_device *dev)
127 return find_deepest_state(drv, dev, UINT_MAX, 0, false);
130 #ifdef CONFIG_SUSPEND
131 static void enter_s2idle_proper(struct cpuidle_driver *drv,
132 struct cpuidle_device *dev, int index)
135 * trace_suspend_resume() called by tick_freeze() for the last CPU
136 * executing it contains RCU usage regarded as invalid in the idle
137 * context, so tell RCU about that.
139 RCU_NONIDLE(tick_freeze());
141 * The state used here cannot be a "coupled" one, because the "coupled"
142 * cpuidle mechanism enables interrupts and doing that with timekeeping
143 * suspended is generally unsafe.
145 stop_critical_timings();
146 drv->states[index].enter_s2idle(dev, drv, index);
147 if (WARN_ON_ONCE(!irqs_disabled()))
150 * timekeeping_resume() that will be called by tick_unfreeze() for the
151 * first CPU executing it calls functions containing RCU read-side
152 * critical sections, so tell RCU about that.
154 RCU_NONIDLE(tick_unfreeze());
155 start_critical_timings();
159 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
160 * @drv: cpuidle driver for the given CPU.
161 * @dev: cpuidle device for the given CPU.
163 * If there are states with the ->enter_s2idle callback, find the deepest of
164 * them and enter it with frozen tick.
166 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
171 * Find the deepest state with ->enter_s2idle present, which guarantees
172 * that interrupts won't be enabled when it exits and allows the tick to
175 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
177 enter_s2idle_proper(drv, dev, index);
181 #endif /* CONFIG_SUSPEND */
184 * cpuidle_enter_state - enter the state and update stats
185 * @dev: cpuidle device for this cpu
186 * @drv: cpuidle driver for this cpu
187 * @index: index into the states table in @drv of the state to enter
189 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
194 struct cpuidle_state *target_state = &drv->states[index];
195 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
196 ktime_t time_start, time_end;
200 * Tell the time framework to switch to a broadcast timer because our
201 * local timer will be shut down. If a local timer is used from another
202 * CPU as a broadcast timer, this call may fail if it is not available.
204 if (broadcast && tick_broadcast_enter()) {
205 index = find_deepest_state(drv, dev, target_state->exit_latency,
206 CPUIDLE_FLAG_TIMER_STOP, false);
211 target_state = &drv->states[index];
215 /* Take note of the planned idle state. */
216 sched_idle_set_state(target_state);
218 trace_cpu_idle_rcuidle(index, dev->cpu);
219 time_start = ns_to_ktime(local_clock());
221 stop_critical_timings();
222 entered_state = target_state->enter(dev, drv, index);
223 start_critical_timings();
225 sched_clock_idle_wakeup_event();
226 time_end = ns_to_ktime(local_clock());
227 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
229 /* The cpu is no longer idle or about to enter idle. */
230 sched_idle_set_state(NULL);
233 if (WARN_ON_ONCE(!irqs_disabled()))
236 tick_broadcast_exit();
239 if (!cpuidle_state_is_coupled(drv, index))
242 diff = ktime_us_delta(time_end, time_start);
246 dev->last_residency = (int) diff;
248 if (entered_state >= 0) {
249 /* Update cpuidle counters */
250 /* This can be moved to within driver enter routine
251 * but that results in multiple copies of same code.
253 dev->states_usage[entered_state].time += dev->last_residency;
254 dev->states_usage[entered_state].usage++;
256 dev->last_residency = 0;
259 return entered_state;
263 * cpuidle_select - ask the cpuidle framework to choose an idle state
265 * @drv: the cpuidle driver
266 * @dev: the cpuidle device
268 * Returns the index of the idle state. The return value must not be negative.
270 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
272 return cpuidle_curr_governor->select(drv, dev);
276 * cpuidle_enter - enter into the specified idle state
278 * @drv: the cpuidle driver tied with the cpu
279 * @dev: the cpuidle device
280 * @index: the index in the idle state table
282 * Returns the index in the idle state, < 0 in case of error.
283 * The error code depends on the backend driver
285 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
288 if (cpuidle_state_is_coupled(drv, index))
289 return cpuidle_enter_state_coupled(dev, drv, index);
290 return cpuidle_enter_state(dev, drv, index);
294 * cpuidle_reflect - tell the underlying governor what was the state
297 * @dev : the cpuidle device
298 * @index: the index in the idle state table
301 void cpuidle_reflect(struct cpuidle_device *dev, int index)
303 if (cpuidle_curr_governor->reflect && index >= 0)
304 cpuidle_curr_governor->reflect(dev, index);
308 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
310 void cpuidle_install_idle_handler(void)
312 if (enabled_devices) {
313 /* Make sure all changes finished before we switch to new idle */
320 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
322 void cpuidle_uninstall_idle_handler(void)
324 if (enabled_devices) {
326 wake_up_all_idle_cpus();
330 * Make sure external observers (such as the scheduler)
331 * are done looking at pointed idle states.
337 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
339 void cpuidle_pause_and_lock(void)
341 mutex_lock(&cpuidle_lock);
342 cpuidle_uninstall_idle_handler();
345 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
348 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
350 void cpuidle_resume_and_unlock(void)
352 cpuidle_install_idle_handler();
353 mutex_unlock(&cpuidle_lock);
356 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
358 /* Currently used in suspend/resume path to suspend cpuidle */
359 void cpuidle_pause(void)
361 mutex_lock(&cpuidle_lock);
362 cpuidle_uninstall_idle_handler();
363 mutex_unlock(&cpuidle_lock);
366 /* Currently used in suspend/resume path to resume cpuidle */
367 void cpuidle_resume(void)
369 mutex_lock(&cpuidle_lock);
370 cpuidle_install_idle_handler();
371 mutex_unlock(&cpuidle_lock);
375 * cpuidle_enable_device - enables idle PM for a CPU
378 * This function must be called between cpuidle_pause_and_lock and
379 * cpuidle_resume_and_unlock when used externally.
381 int cpuidle_enable_device(struct cpuidle_device *dev)
384 struct cpuidle_driver *drv;
392 drv = cpuidle_get_cpu_driver(dev);
394 if (!drv || !cpuidle_curr_governor)
397 if (!dev->registered)
400 ret = cpuidle_add_device_sysfs(dev);
404 if (cpuidle_curr_governor->enable &&
405 (ret = cpuidle_curr_governor->enable(drv, dev)))
416 cpuidle_remove_device_sysfs(dev);
421 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
424 * cpuidle_disable_device - disables idle PM for a CPU
427 * This function must be called between cpuidle_pause_and_lock and
428 * cpuidle_resume_and_unlock when used externally.
430 void cpuidle_disable_device(struct cpuidle_device *dev)
432 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
434 if (!dev || !dev->enabled)
437 if (!drv || !cpuidle_curr_governor)
442 if (cpuidle_curr_governor->disable)
443 cpuidle_curr_governor->disable(drv, dev);
445 cpuidle_remove_device_sysfs(dev);
449 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
451 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
453 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
455 list_del(&dev->device_list);
456 per_cpu(cpuidle_devices, dev->cpu) = NULL;
457 module_put(drv->owner);
462 static void __cpuidle_device_init(struct cpuidle_device *dev)
464 memset(dev->states_usage, 0, sizeof(dev->states_usage));
465 dev->last_residency = 0;
469 * __cpuidle_register_device - internal register function called before register
470 * and enable routines
473 * cpuidle_lock mutex must be held before this is called
475 static int __cpuidle_register_device(struct cpuidle_device *dev)
478 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
480 if (!try_module_get(drv->owner))
483 per_cpu(cpuidle_devices, dev->cpu) = dev;
484 list_add(&dev->device_list, &cpuidle_detected_devices);
486 ret = cpuidle_coupled_register_device(dev);
488 __cpuidle_unregister_device(dev);
496 * cpuidle_register_device - registers a CPU's idle PM feature
499 int cpuidle_register_device(struct cpuidle_device *dev)
506 mutex_lock(&cpuidle_lock);
511 __cpuidle_device_init(dev);
513 ret = __cpuidle_register_device(dev);
517 ret = cpuidle_add_sysfs(dev);
521 ret = cpuidle_enable_device(dev);
525 cpuidle_install_idle_handler();
528 mutex_unlock(&cpuidle_lock);
533 cpuidle_remove_sysfs(dev);
535 __cpuidle_unregister_device(dev);
539 EXPORT_SYMBOL_GPL(cpuidle_register_device);
542 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
545 void cpuidle_unregister_device(struct cpuidle_device *dev)
547 if (!dev || dev->registered == 0)
550 cpuidle_pause_and_lock();
552 cpuidle_disable_device(dev);
554 cpuidle_remove_sysfs(dev);
556 __cpuidle_unregister_device(dev);
558 cpuidle_coupled_unregister_device(dev);
560 cpuidle_resume_and_unlock();
563 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
566 * cpuidle_unregister: unregister a driver and the devices. This function
567 * can be used only if the driver has been previously registered through
568 * the cpuidle_register function.
570 * @drv: a valid pointer to a struct cpuidle_driver
572 void cpuidle_unregister(struct cpuidle_driver *drv)
575 struct cpuidle_device *device;
577 for_each_cpu(cpu, drv->cpumask) {
578 device = &per_cpu(cpuidle_dev, cpu);
579 cpuidle_unregister_device(device);
582 cpuidle_unregister_driver(drv);
584 EXPORT_SYMBOL_GPL(cpuidle_unregister);
587 * cpuidle_register: registers the driver and the cpu devices with the
588 * coupled_cpus passed as parameter. This function is used for all common
589 * initialization pattern there are in the arch specific drivers. The
590 * devices is globally defined in this file.
592 * @drv : a valid pointer to a struct cpuidle_driver
593 * @coupled_cpus: a cpumask for the coupled states
595 * Returns 0 on success, < 0 otherwise
597 int cpuidle_register(struct cpuidle_driver *drv,
598 const struct cpumask *const coupled_cpus)
601 struct cpuidle_device *device;
603 ret = cpuidle_register_driver(drv);
605 pr_err("failed to register cpuidle driver\n");
609 for_each_cpu(cpu, drv->cpumask) {
610 device = &per_cpu(cpuidle_dev, cpu);
613 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
615 * On multiplatform for ARM, the coupled idle states could be
616 * enabled in the kernel even if the cpuidle driver does not
617 * use it. Note, coupled_cpus is a struct copy.
620 device->coupled_cpus = *coupled_cpus;
622 ret = cpuidle_register_device(device);
626 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
628 cpuidle_unregister(drv);
634 EXPORT_SYMBOL_GPL(cpuidle_register);
639 * This function gets called when a part of the kernel has a new latency
640 * requirement. This means we need to get all processors out of their C-state,
641 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
642 * wakes them all right up.
644 static int cpuidle_latency_notify(struct notifier_block *b,
645 unsigned long l, void *v)
647 wake_up_all_idle_cpus();
651 static struct notifier_block cpuidle_latency_notifier = {
652 .notifier_call = cpuidle_latency_notify,
655 static inline void latency_notifier_init(struct notifier_block *n)
657 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
660 #else /* CONFIG_SMP */
662 #define latency_notifier_init(x) do { } while (0)
664 #endif /* CONFIG_SMP */
667 * cpuidle_init - core initializer
669 static int __init cpuidle_init(void)
673 if (cpuidle_disabled())
676 ret = cpuidle_add_interface(cpu_subsys.dev_root);
680 latency_notifier_init(&cpuidle_latency_notifier);
685 module_param(off, int, 0444);
686 core_initcall(cpuidle_init);