2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/kmod.h>
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/stat.h>
22 #include <linux/pm_opp.h>
23 #include <linux/devfreq.h>
24 #include <linux/workqueue.h>
25 #include <linux/platform_device.h>
26 #include <linux/list.h>
27 #include <linux/printk.h>
28 #include <linux/hrtimer.h>
32 #define MAX(a,b) ((a > b) ? a : b)
33 #define MIN(a,b) ((a < b) ? a : b)
35 static struct class *devfreq_class;
38 * devfreq core provides delayed work based load monitoring helper
39 * functions. Governors can use these or can implement their own
40 * monitoring mechanism.
42 static struct workqueue_struct *devfreq_wq;
44 /* The list of all device-devfreq governors */
45 static LIST_HEAD(devfreq_governor_list);
46 /* The list of all device-devfreq */
47 static LIST_HEAD(devfreq_list);
48 static DEFINE_MUTEX(devfreq_list_lock);
51 * find_device_devfreq() - find devfreq struct using device pointer
52 * @dev: device pointer used to lookup device devfreq.
54 * Search the list of device devfreqs and return the matched device's
55 * devfreq info. devfreq_list_lock should be held by the caller.
57 static struct devfreq *find_device_devfreq(struct device *dev)
59 struct devfreq *tmp_devfreq;
61 if (IS_ERR_OR_NULL(dev)) {
62 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
63 return ERR_PTR(-EINVAL);
65 WARN(!mutex_is_locked(&devfreq_list_lock),
66 "devfreq_list_lock must be locked.");
68 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
69 if (tmp_devfreq->dev.parent == dev)
73 return ERR_PTR(-ENODEV);
76 static unsigned long find_available_min_freq(struct devfreq *devfreq)
78 struct dev_pm_opp *opp;
79 unsigned long min_freq = 0;
81 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
90 static unsigned long find_available_max_freq(struct devfreq *devfreq)
92 struct dev_pm_opp *opp;
93 unsigned long max_freq = ULONG_MAX;
95 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
105 * devfreq_get_freq_level() - Lookup freq_table for the frequency
106 * @devfreq: the devfreq instance
107 * @freq: the target frequency
109 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
113 for (lev = 0; lev < devfreq->profile->max_state; lev++)
114 if (freq == devfreq->profile->freq_table[lev])
120 static int set_freq_table(struct devfreq *devfreq)
122 struct devfreq_dev_profile *profile = devfreq->profile;
123 struct dev_pm_opp *opp;
127 /* Initialize the freq_table from OPP table */
128 count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
132 profile->max_state = count;
133 profile->freq_table = devm_kcalloc(devfreq->dev.parent,
135 sizeof(*profile->freq_table),
137 if (!profile->freq_table) {
138 profile->max_state = 0;
142 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
143 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
145 devm_kfree(devfreq->dev.parent, profile->freq_table);
146 profile->max_state = 0;
150 profile->freq_table[i] = freq;
157 * devfreq_update_status() - Update statistics of devfreq behavior
158 * @devfreq: the devfreq instance
159 * @freq: the update target frequency
161 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
163 int lev, prev_lev, ret = 0;
164 unsigned long cur_time;
166 lockdep_assert_held(&devfreq->lock);
169 /* Immediately exit if previous_freq is not initialized yet. */
170 if (!devfreq->previous_freq)
173 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
179 devfreq->time_in_state[prev_lev] +=
180 cur_time - devfreq->last_stat_updated;
182 lev = devfreq_get_freq_level(devfreq, freq);
188 if (lev != prev_lev) {
189 devfreq->trans_table[(prev_lev *
190 devfreq->profile->max_state) + lev]++;
191 devfreq->total_trans++;
195 devfreq->last_stat_updated = cur_time;
198 EXPORT_SYMBOL(devfreq_update_status);
201 * find_devfreq_governor() - find devfreq governor from name
202 * @name: name of the governor
204 * Search the list of devfreq governors and return the matched
205 * governor's pointer. devfreq_list_lock should be held by the caller.
207 static struct devfreq_governor *find_devfreq_governor(const char *name)
209 struct devfreq_governor *tmp_governor;
211 if (IS_ERR_OR_NULL(name)) {
212 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
213 return ERR_PTR(-EINVAL);
215 WARN(!mutex_is_locked(&devfreq_list_lock),
216 "devfreq_list_lock must be locked.");
218 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
219 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
223 return ERR_PTR(-ENODEV);
227 * try_then_request_governor() - Try to find the governor and request the
228 * module if is not found.
229 * @name: name of the governor
231 * Search the list of devfreq governors and request the module and try again
232 * if is not found. This can happen when both drivers (the governor driver
233 * and the driver that call devfreq_add_device) are built as modules.
234 * devfreq_list_lock should be held by the caller. Returns the matched
235 * governor's pointer or an error pointer.
237 static struct devfreq_governor *try_then_request_governor(const char *name)
239 struct devfreq_governor *governor;
242 if (IS_ERR_OR_NULL(name)) {
243 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
244 return ERR_PTR(-EINVAL);
246 WARN(!mutex_is_locked(&devfreq_list_lock),
247 "devfreq_list_lock must be locked.");
249 governor = find_devfreq_governor(name);
250 if (IS_ERR(governor)) {
251 mutex_unlock(&devfreq_list_lock);
253 if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
255 err = request_module("governor_%s", "simpleondemand");
257 err = request_module("governor_%s", name);
258 /* Restore previous state before return */
259 mutex_lock(&devfreq_list_lock);
261 return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
263 governor = find_devfreq_governor(name);
269 static int devfreq_notify_transition(struct devfreq *devfreq,
270 struct devfreq_freqs *freqs, unsigned int state)
276 case DEVFREQ_PRECHANGE:
277 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
278 DEVFREQ_PRECHANGE, freqs);
281 case DEVFREQ_POSTCHANGE:
282 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
283 DEVFREQ_POSTCHANGE, freqs);
292 /* Load monitoring helper functions for governors use */
295 * update_devfreq() - Reevaluate the device and configure frequency.
296 * @devfreq: the devfreq instance.
298 * Note: Lock devfreq->lock before calling update_devfreq
299 * This function is exported for governors.
301 int update_devfreq(struct devfreq *devfreq)
303 struct devfreq_freqs freqs;
304 unsigned long freq, cur_freq, min_freq, max_freq;
308 if (!mutex_is_locked(&devfreq->lock)) {
309 WARN(true, "devfreq->lock must be locked by the caller.\n");
313 if (!devfreq->governor)
316 /* Reevaluate the proper frequency */
317 err = devfreq->governor->get_target_freq(devfreq, &freq);
322 * Adjust the frequency with user freq, QoS and available freq.
324 * List from the highest priority
328 max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
329 min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
331 if (freq < min_freq) {
333 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
335 if (freq > max_freq) {
337 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
340 if (devfreq->profile->get_cur_freq)
341 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
343 cur_freq = devfreq->previous_freq;
345 freqs.old = cur_freq;
347 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
349 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
351 freqs.new = cur_freq;
352 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
357 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
359 if (devfreq_update_status(devfreq, freq))
360 dev_err(&devfreq->dev,
361 "Couldn't update frequency transition information.\n");
363 devfreq->previous_freq = freq;
366 EXPORT_SYMBOL(update_devfreq);
369 * devfreq_monitor() - Periodically poll devfreq objects.
370 * @work: the work struct used to run devfreq_monitor periodically.
373 static void devfreq_monitor(struct work_struct *work)
376 struct devfreq *devfreq = container_of(work,
377 struct devfreq, work.work);
379 mutex_lock(&devfreq->lock);
380 err = update_devfreq(devfreq);
382 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
384 queue_delayed_work(devfreq_wq, &devfreq->work,
385 msecs_to_jiffies(devfreq->profile->polling_ms));
386 mutex_unlock(&devfreq->lock);
390 * devfreq_monitor_start() - Start load monitoring of devfreq instance
391 * @devfreq: the devfreq instance.
393 * Helper function for starting devfreq device load monitoing. By
394 * default delayed work based monitoring is supported. Function
395 * to be called from governor in response to DEVFREQ_GOV_START
396 * event when device is added to devfreq framework.
398 void devfreq_monitor_start(struct devfreq *devfreq)
400 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
401 if (devfreq->profile->polling_ms)
402 queue_delayed_work(devfreq_wq, &devfreq->work,
403 msecs_to_jiffies(devfreq->profile->polling_ms));
405 EXPORT_SYMBOL(devfreq_monitor_start);
408 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
409 * @devfreq: the devfreq instance.
411 * Helper function to stop devfreq device load monitoing. Function
412 * to be called from governor in response to DEVFREQ_GOV_STOP
413 * event when device is removed from devfreq framework.
415 void devfreq_monitor_stop(struct devfreq *devfreq)
417 cancel_delayed_work_sync(&devfreq->work);
419 EXPORT_SYMBOL(devfreq_monitor_stop);
422 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
423 * @devfreq: the devfreq instance.
425 * Helper function to suspend devfreq device load monitoing. Function
426 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
427 * event or when polling interval is set to zero.
429 * Note: Though this function is same as devfreq_monitor_stop(),
430 * intentionally kept separate to provide hooks for collecting
431 * transition statistics.
433 void devfreq_monitor_suspend(struct devfreq *devfreq)
435 mutex_lock(&devfreq->lock);
436 if (devfreq->stop_polling) {
437 mutex_unlock(&devfreq->lock);
441 devfreq_update_status(devfreq, devfreq->previous_freq);
442 devfreq->stop_polling = true;
443 mutex_unlock(&devfreq->lock);
444 cancel_delayed_work_sync(&devfreq->work);
446 EXPORT_SYMBOL(devfreq_monitor_suspend);
449 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
450 * @devfreq: the devfreq instance.
452 * Helper function to resume devfreq device load monitoing. Function
453 * to be called from governor in response to DEVFREQ_GOV_RESUME
454 * event or when polling interval is set to non-zero.
456 void devfreq_monitor_resume(struct devfreq *devfreq)
460 mutex_lock(&devfreq->lock);
461 if (!devfreq->stop_polling)
464 if (!delayed_work_pending(&devfreq->work) &&
465 devfreq->profile->polling_ms)
466 queue_delayed_work(devfreq_wq, &devfreq->work,
467 msecs_to_jiffies(devfreq->profile->polling_ms));
469 devfreq->last_stat_updated = jiffies;
470 devfreq->stop_polling = false;
472 if (devfreq->profile->get_cur_freq &&
473 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
474 devfreq->previous_freq = freq;
477 mutex_unlock(&devfreq->lock);
479 EXPORT_SYMBOL(devfreq_monitor_resume);
482 * devfreq_interval_update() - Update device devfreq monitoring interval
483 * @devfreq: the devfreq instance.
484 * @delay: new polling interval to be set.
486 * Helper function to set new load monitoring polling interval. Function
487 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
489 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
491 unsigned int cur_delay = devfreq->profile->polling_ms;
492 unsigned int new_delay = *delay;
494 mutex_lock(&devfreq->lock);
495 devfreq->profile->polling_ms = new_delay;
497 if (devfreq->stop_polling)
500 /* if new delay is zero, stop polling */
502 mutex_unlock(&devfreq->lock);
503 cancel_delayed_work_sync(&devfreq->work);
507 /* if current delay is zero, start polling with new delay */
509 queue_delayed_work(devfreq_wq, &devfreq->work,
510 msecs_to_jiffies(devfreq->profile->polling_ms));
514 /* if current delay is greater than new delay, restart polling */
515 if (cur_delay > new_delay) {
516 mutex_unlock(&devfreq->lock);
517 cancel_delayed_work_sync(&devfreq->work);
518 mutex_lock(&devfreq->lock);
519 if (!devfreq->stop_polling)
520 queue_delayed_work(devfreq_wq, &devfreq->work,
521 msecs_to_jiffies(devfreq->profile->polling_ms));
524 mutex_unlock(&devfreq->lock);
526 EXPORT_SYMBOL(devfreq_interval_update);
529 * devfreq_notifier_call() - Notify that the device frequency requirements
530 * has been changed out of devfreq framework.
531 * @nb: the notifier_block (supposed to be devfreq->nb)
535 * Called by a notifier that uses devfreq->nb.
537 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
540 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
543 mutex_lock(&devfreq->lock);
545 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
546 if (!devfreq->scaling_min_freq)
549 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
550 if (!devfreq->scaling_max_freq) {
551 devfreq->scaling_max_freq = ULONG_MAX;
555 err = update_devfreq(devfreq);
558 mutex_unlock(&devfreq->lock);
560 dev_err(devfreq->dev.parent,
561 "failed to update frequency from OPP notifier (%d)\n",
568 * devfreq_dev_release() - Callback for struct device to release the device.
569 * @dev: the devfreq device
571 * Remove devfreq from the list and release its resources.
573 static void devfreq_dev_release(struct device *dev)
575 struct devfreq *devfreq = to_devfreq(dev);
577 mutex_lock(&devfreq_list_lock);
578 list_del(&devfreq->node);
579 mutex_unlock(&devfreq_list_lock);
581 if (devfreq->profile->exit)
582 devfreq->profile->exit(devfreq->dev.parent);
584 mutex_destroy(&devfreq->lock);
585 srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
590 * devfreq_add_device() - Add devfreq feature to the device
591 * @dev: the device to add devfreq feature.
592 * @profile: device-specific profile to run devfreq.
593 * @governor_name: name of the policy to choose frequency.
594 * @data: private data for the governor. The devfreq framework does not
597 struct devfreq *devfreq_add_device(struct device *dev,
598 struct devfreq_dev_profile *profile,
599 const char *governor_name,
602 struct devfreq *devfreq;
603 struct devfreq_governor *governor;
606 if (!dev || !profile || !governor_name) {
607 dev_err(dev, "%s: Invalid parameters.\n", __func__);
608 return ERR_PTR(-EINVAL);
611 mutex_lock(&devfreq_list_lock);
612 devfreq = find_device_devfreq(dev);
613 mutex_unlock(&devfreq_list_lock);
614 if (!IS_ERR(devfreq)) {
615 dev_err(dev, "%s: Unable to create devfreq for the device.\n",
621 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
627 mutex_init(&devfreq->lock);
628 mutex_lock(&devfreq->lock);
629 devfreq->dev.parent = dev;
630 devfreq->dev.class = devfreq_class;
631 devfreq->dev.release = devfreq_dev_release;
632 INIT_LIST_HEAD(&devfreq->node);
633 devfreq->profile = profile;
634 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
635 devfreq->previous_freq = profile->initial_freq;
636 devfreq->last_status.current_frequency = profile->initial_freq;
637 devfreq->data = data;
638 devfreq->nb.notifier_call = devfreq_notifier_call;
640 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
641 mutex_unlock(&devfreq->lock);
642 err = set_freq_table(devfreq);
645 mutex_lock(&devfreq->lock);
648 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
649 if (!devfreq->scaling_min_freq) {
650 mutex_unlock(&devfreq->lock);
654 devfreq->min_freq = devfreq->scaling_min_freq;
656 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
657 if (!devfreq->scaling_max_freq) {
658 mutex_unlock(&devfreq->lock);
662 devfreq->max_freq = devfreq->scaling_max_freq;
664 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
665 err = device_register(&devfreq->dev);
667 mutex_unlock(&devfreq->lock);
668 put_device(&devfreq->dev);
672 devfreq->trans_table =
673 devm_kzalloc(&devfreq->dev,
674 array3_size(sizeof(unsigned int),
675 devfreq->profile->max_state,
676 devfreq->profile->max_state),
678 devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
679 devfreq->profile->max_state,
680 sizeof(unsigned long),
682 devfreq->last_stat_updated = jiffies;
684 srcu_init_notifier_head(&devfreq->transition_notifier_list);
686 mutex_unlock(&devfreq->lock);
688 mutex_lock(&devfreq_list_lock);
690 governor = try_then_request_governor(devfreq->governor_name);
691 if (IS_ERR(governor)) {
692 dev_err(dev, "%s: Unable to find governor for the device\n",
694 err = PTR_ERR(governor);
698 devfreq->governor = governor;
699 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
702 dev_err(dev, "%s: Unable to start governor for the device\n",
707 list_add(&devfreq->node, &devfreq_list);
709 mutex_unlock(&devfreq_list_lock);
714 mutex_unlock(&devfreq_list_lock);
716 devfreq_remove_device(devfreq);
724 EXPORT_SYMBOL(devfreq_add_device);
727 * devfreq_remove_device() - Remove devfreq feature from a device.
728 * @devfreq: the devfreq instance to be removed
730 * The opposite of devfreq_add_device().
732 int devfreq_remove_device(struct devfreq *devfreq)
737 if (devfreq->governor)
738 devfreq->governor->event_handler(devfreq,
739 DEVFREQ_GOV_STOP, NULL);
740 device_unregister(&devfreq->dev);
744 EXPORT_SYMBOL(devfreq_remove_device);
746 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
748 struct devfreq **r = res;
750 if (WARN_ON(!r || !*r))
756 static void devm_devfreq_dev_release(struct device *dev, void *res)
758 devfreq_remove_device(*(struct devfreq **)res);
762 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
763 * @dev: the device to add devfreq feature.
764 * @profile: device-specific profile to run devfreq.
765 * @governor_name: name of the policy to choose frequency.
766 * @data: private data for the governor. The devfreq framework does not
769 * This function manages automatically the memory of devfreq device using device
770 * resource management and simplify the free operation for memory of devfreq
773 struct devfreq *devm_devfreq_add_device(struct device *dev,
774 struct devfreq_dev_profile *profile,
775 const char *governor_name,
778 struct devfreq **ptr, *devfreq;
780 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
782 return ERR_PTR(-ENOMEM);
784 devfreq = devfreq_add_device(dev, profile, governor_name, data);
785 if (IS_ERR(devfreq)) {
791 devres_add(dev, ptr);
795 EXPORT_SYMBOL(devm_devfreq_add_device);
799 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
800 * @dev - instance to the given device
801 * @index - index into list of devfreq
803 * return the instance of devfreq device
805 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
807 struct device_node *node;
808 struct devfreq *devfreq;
811 return ERR_PTR(-EINVAL);
814 return ERR_PTR(-EINVAL);
816 node = of_parse_phandle(dev->of_node, "devfreq", index);
818 return ERR_PTR(-ENODEV);
820 mutex_lock(&devfreq_list_lock);
821 list_for_each_entry(devfreq, &devfreq_list, node) {
822 if (devfreq->dev.parent
823 && devfreq->dev.parent->of_node == node) {
824 mutex_unlock(&devfreq_list_lock);
829 mutex_unlock(&devfreq_list_lock);
832 return ERR_PTR(-EPROBE_DEFER);
835 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
837 return ERR_PTR(-ENODEV);
839 #endif /* CONFIG_OF */
840 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
843 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
844 * @dev: the device to add devfreq feature.
845 * @devfreq: the devfreq instance to be removed
847 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
849 WARN_ON(devres_release(dev, devm_devfreq_dev_release,
850 devm_devfreq_dev_match, devfreq));
852 EXPORT_SYMBOL(devm_devfreq_remove_device);
855 * devfreq_suspend_device() - Suspend devfreq of a device.
856 * @devfreq: the devfreq instance to be suspended
858 * This function is intended to be called by the pm callbacks
859 * (e.g., runtime_suspend, suspend) of the device driver that
862 int devfreq_suspend_device(struct devfreq *devfreq)
867 if (!devfreq->governor)
870 return devfreq->governor->event_handler(devfreq,
871 DEVFREQ_GOV_SUSPEND, NULL);
873 EXPORT_SYMBOL(devfreq_suspend_device);
876 * devfreq_resume_device() - Resume devfreq of a device.
877 * @devfreq: the devfreq instance to be resumed
879 * This function is intended to be called by the pm callbacks
880 * (e.g., runtime_resume, resume) of the device driver that
883 int devfreq_resume_device(struct devfreq *devfreq)
888 if (!devfreq->governor)
891 return devfreq->governor->event_handler(devfreq,
892 DEVFREQ_GOV_RESUME, NULL);
894 EXPORT_SYMBOL(devfreq_resume_device);
897 * devfreq_add_governor() - Add devfreq governor
898 * @governor: the devfreq governor to be added
900 int devfreq_add_governor(struct devfreq_governor *governor)
902 struct devfreq_governor *g;
903 struct devfreq *devfreq;
907 pr_err("%s: Invalid parameters.\n", __func__);
911 mutex_lock(&devfreq_list_lock);
912 g = find_devfreq_governor(governor->name);
914 pr_err("%s: governor %s already registered\n", __func__,
920 list_add(&governor->node, &devfreq_governor_list);
922 list_for_each_entry(devfreq, &devfreq_list, node) {
924 struct device *dev = devfreq->dev.parent;
926 if (!strncmp(devfreq->governor_name, governor->name,
928 /* The following should never occur */
929 if (devfreq->governor) {
931 "%s: Governor %s already present\n",
932 __func__, devfreq->governor->name);
933 ret = devfreq->governor->event_handler(devfreq,
934 DEVFREQ_GOV_STOP, NULL);
937 "%s: Governor %s stop = %d\n",
939 devfreq->governor->name, ret);
943 devfreq->governor = governor;
944 ret = devfreq->governor->event_handler(devfreq,
945 DEVFREQ_GOV_START, NULL);
947 dev_warn(dev, "%s: Governor %s start=%d\n",
948 __func__, devfreq->governor->name,
955 mutex_unlock(&devfreq_list_lock);
959 EXPORT_SYMBOL(devfreq_add_governor);
962 * devfreq_remove_governor() - Remove devfreq feature from a device.
963 * @governor: the devfreq governor to be removed
965 int devfreq_remove_governor(struct devfreq_governor *governor)
967 struct devfreq_governor *g;
968 struct devfreq *devfreq;
972 pr_err("%s: Invalid parameters.\n", __func__);
976 mutex_lock(&devfreq_list_lock);
977 g = find_devfreq_governor(governor->name);
979 pr_err("%s: governor %s not registered\n", __func__,
984 list_for_each_entry(devfreq, &devfreq_list, node) {
986 struct device *dev = devfreq->dev.parent;
988 if (!strncmp(devfreq->governor_name, governor->name,
990 /* we should have a devfreq governor! */
991 if (!devfreq->governor) {
992 dev_warn(dev, "%s: Governor %s NOT present\n",
993 __func__, governor->name);
997 ret = devfreq->governor->event_handler(devfreq,
998 DEVFREQ_GOV_STOP, NULL);
1000 dev_warn(dev, "%s: Governor %s stop=%d\n",
1001 __func__, devfreq->governor->name,
1004 devfreq->governor = NULL;
1008 list_del(&governor->node);
1010 mutex_unlock(&devfreq_list_lock);
1014 EXPORT_SYMBOL(devfreq_remove_governor);
1016 static ssize_t name_show(struct device *dev,
1017 struct device_attribute *attr, char *buf)
1019 struct devfreq *devfreq = to_devfreq(dev);
1020 return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1022 static DEVICE_ATTR_RO(name);
1024 static ssize_t governor_show(struct device *dev,
1025 struct device_attribute *attr, char *buf)
1027 if (!to_devfreq(dev)->governor)
1030 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1033 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1034 const char *buf, size_t count)
1036 struct devfreq *df = to_devfreq(dev);
1038 char str_governor[DEVFREQ_NAME_LEN + 1];
1039 struct devfreq_governor *governor;
1041 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1045 mutex_lock(&devfreq_list_lock);
1046 governor = try_then_request_governor(str_governor);
1047 if (IS_ERR(governor)) {
1048 ret = PTR_ERR(governor);
1051 if (df->governor == governor) {
1054 } else if ((df->governor && df->governor->immutable) ||
1055 governor->immutable) {
1061 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1063 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1064 __func__, df->governor->name, ret);
1068 df->governor = governor;
1069 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1070 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1072 dev_warn(dev, "%s: Governor %s not started(%d)\n",
1073 __func__, df->governor->name, ret);
1075 mutex_unlock(&devfreq_list_lock);
1081 static DEVICE_ATTR_RW(governor);
1083 static ssize_t available_governors_show(struct device *d,
1084 struct device_attribute *attr,
1087 struct devfreq *df = to_devfreq(d);
1090 mutex_lock(&devfreq_list_lock);
1093 * The devfreq with immutable governor (e.g., passive) shows
1094 * only own governor.
1096 if (df->governor && df->governor->immutable) {
1097 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1098 "%s ", df->governor_name);
1100 * The devfreq device shows the registered governor except for
1101 * immutable governors such as passive governor .
1104 struct devfreq_governor *governor;
1106 list_for_each_entry(governor, &devfreq_governor_list, node) {
1107 if (governor->immutable)
1109 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1110 "%s ", governor->name);
1114 mutex_unlock(&devfreq_list_lock);
1116 /* Truncate the trailing space */
1120 count += sprintf(&buf[count], "\n");
1124 static DEVICE_ATTR_RO(available_governors);
1126 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1130 struct devfreq *devfreq = to_devfreq(dev);
1132 if (devfreq->profile->get_cur_freq &&
1133 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1134 return sprintf(buf, "%lu\n", freq);
1136 return sprintf(buf, "%lu\n", devfreq->previous_freq);
1138 static DEVICE_ATTR_RO(cur_freq);
1140 static ssize_t target_freq_show(struct device *dev,
1141 struct device_attribute *attr, char *buf)
1143 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1145 static DEVICE_ATTR_RO(target_freq);
1147 static ssize_t polling_interval_show(struct device *dev,
1148 struct device_attribute *attr, char *buf)
1150 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1153 static ssize_t polling_interval_store(struct device *dev,
1154 struct device_attribute *attr,
1155 const char *buf, size_t count)
1157 struct devfreq *df = to_devfreq(dev);
1164 ret = sscanf(buf, "%u", &value);
1168 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1173 static DEVICE_ATTR_RW(polling_interval);
1175 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1176 const char *buf, size_t count)
1178 struct devfreq *df = to_devfreq(dev);
1179 unsigned long value;
1182 ret = sscanf(buf, "%lu", &value);
1186 mutex_lock(&df->lock);
1189 if (value > df->max_freq) {
1194 unsigned long *freq_table = df->profile->freq_table;
1196 /* Get minimum frequency according to sorting order */
1197 if (freq_table[0] < freq_table[df->profile->max_state - 1])
1198 value = freq_table[0];
1200 value = freq_table[df->profile->max_state - 1];
1203 df->min_freq = value;
1207 mutex_unlock(&df->lock);
1211 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1214 struct devfreq *df = to_devfreq(dev);
1216 return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
1219 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1220 const char *buf, size_t count)
1222 struct devfreq *df = to_devfreq(dev);
1223 unsigned long value;
1226 ret = sscanf(buf, "%lu", &value);
1230 mutex_lock(&df->lock);
1233 if (value < df->min_freq) {
1238 unsigned long *freq_table = df->profile->freq_table;
1240 /* Get maximum frequency according to sorting order */
1241 if (freq_table[0] < freq_table[df->profile->max_state - 1])
1242 value = freq_table[df->profile->max_state - 1];
1244 value = freq_table[0];
1247 df->max_freq = value;
1251 mutex_unlock(&df->lock);
1254 static DEVICE_ATTR_RW(min_freq);
1256 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1259 struct devfreq *df = to_devfreq(dev);
1261 return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
1263 static DEVICE_ATTR_RW(max_freq);
1265 static ssize_t available_frequencies_show(struct device *d,
1266 struct device_attribute *attr,
1269 struct devfreq *df = to_devfreq(d);
1273 mutex_lock(&df->lock);
1275 for (i = 0; i < df->profile->max_state; i++)
1276 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1277 "%lu ", df->profile->freq_table[i]);
1279 mutex_unlock(&df->lock);
1280 /* Truncate the trailing space */
1284 count += sprintf(&buf[count], "\n");
1288 static DEVICE_ATTR_RO(available_frequencies);
1290 static ssize_t trans_stat_show(struct device *dev,
1291 struct device_attribute *attr, char *buf)
1293 struct devfreq *devfreq = to_devfreq(dev);
1296 unsigned int max_state = devfreq->profile->max_state;
1299 return sprintf(buf, "Not Supported.\n");
1301 mutex_lock(&devfreq->lock);
1302 if (!devfreq->stop_polling &&
1303 devfreq_update_status(devfreq, devfreq->previous_freq)) {
1304 mutex_unlock(&devfreq->lock);
1307 mutex_unlock(&devfreq->lock);
1309 len = sprintf(buf, " From : To\n");
1310 len += sprintf(buf + len, " :");
1311 for (i = 0; i < max_state; i++)
1312 len += sprintf(buf + len, "%10lu",
1313 devfreq->profile->freq_table[i]);
1315 len += sprintf(buf + len, " time(ms)\n");
1317 for (i = 0; i < max_state; i++) {
1318 if (devfreq->profile->freq_table[i]
1319 == devfreq->previous_freq) {
1320 len += sprintf(buf + len, "*");
1322 len += sprintf(buf + len, " ");
1324 len += sprintf(buf + len, "%10lu:",
1325 devfreq->profile->freq_table[i]);
1326 for (j = 0; j < max_state; j++)
1327 len += sprintf(buf + len, "%10u",
1328 devfreq->trans_table[(i * max_state) + j]);
1329 len += sprintf(buf + len, "%10u\n",
1330 jiffies_to_msecs(devfreq->time_in_state[i]));
1333 len += sprintf(buf + len, "Total transition : %u\n",
1334 devfreq->total_trans);
1337 static DEVICE_ATTR_RO(trans_stat);
1339 static struct attribute *devfreq_attrs[] = {
1340 &dev_attr_name.attr,
1341 &dev_attr_governor.attr,
1342 &dev_attr_available_governors.attr,
1343 &dev_attr_cur_freq.attr,
1344 &dev_attr_available_frequencies.attr,
1345 &dev_attr_target_freq.attr,
1346 &dev_attr_polling_interval.attr,
1347 &dev_attr_min_freq.attr,
1348 &dev_attr_max_freq.attr,
1349 &dev_attr_trans_stat.attr,
1352 ATTRIBUTE_GROUPS(devfreq);
1354 static int __init devfreq_init(void)
1356 devfreq_class = class_create(THIS_MODULE, "devfreq");
1357 if (IS_ERR(devfreq_class)) {
1358 pr_err("%s: couldn't create class\n", __FILE__);
1359 return PTR_ERR(devfreq_class);
1362 devfreq_wq = create_freezable_workqueue("devfreq_wq");
1364 class_destroy(devfreq_class);
1365 pr_err("%s: couldn't create workqueue\n", __FILE__);
1368 devfreq_class->dev_groups = devfreq_groups;
1372 subsys_initcall(devfreq_init);
1375 * The following are helper functions for devfreq user device drivers with
1380 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1381 * freq value given to target callback.
1382 * @dev: The devfreq user device. (parent of devfreq)
1383 * @freq: The frequency given to target function
1384 * @flags: Flags handed from devfreq framework.
1386 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1389 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1390 unsigned long *freq,
1393 struct dev_pm_opp *opp;
1395 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1396 /* The freq is an upper bound. opp should be lower */
1397 opp = dev_pm_opp_find_freq_floor(dev, freq);
1399 /* If not available, use the closest opp */
1400 if (opp == ERR_PTR(-ERANGE))
1401 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1403 /* The freq is an lower bound. opp should be higher */
1404 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1406 /* If not available, use the closest opp */
1407 if (opp == ERR_PTR(-ERANGE))
1408 opp = dev_pm_opp_find_freq_floor(dev, freq);
1413 EXPORT_SYMBOL(devfreq_recommended_opp);
1416 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1417 * for any changes in the OPP availability
1419 * @dev: The devfreq user device. (parent of devfreq)
1420 * @devfreq: The devfreq object.
1422 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1424 return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1426 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1429 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1430 * notified for any changes in the OPP
1431 * availability changes anymore.
1432 * @dev: The devfreq user device. (parent of devfreq)
1433 * @devfreq: The devfreq object.
1435 * At exit() callback of devfreq_dev_profile, this must be included if
1436 * devfreq_recommended_opp is used.
1438 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1440 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1442 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1444 static void devm_devfreq_opp_release(struct device *dev, void *res)
1446 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1450 * devm_ devfreq_register_opp_notifier()
1451 * - Resource-managed devfreq_register_opp_notifier()
1452 * @dev: The devfreq user device. (parent of devfreq)
1453 * @devfreq: The devfreq object.
1455 int devm_devfreq_register_opp_notifier(struct device *dev,
1456 struct devfreq *devfreq)
1458 struct devfreq **ptr;
1461 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1465 ret = devfreq_register_opp_notifier(dev, devfreq);
1472 devres_add(dev, ptr);
1476 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1479 * devm_devfreq_unregister_opp_notifier()
1480 * - Resource-managed devfreq_unregister_opp_notifier()
1481 * @dev: The devfreq user device. (parent of devfreq)
1482 * @devfreq: The devfreq object.
1484 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1485 struct devfreq *devfreq)
1487 WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1488 devm_devfreq_dev_match, devfreq));
1490 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1493 * devfreq_register_notifier() - Register a driver with devfreq
1494 * @devfreq: The devfreq object.
1495 * @nb: The notifier block to register.
1496 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1498 int devfreq_register_notifier(struct devfreq *devfreq,
1499 struct notifier_block *nb,
1508 case DEVFREQ_TRANSITION_NOTIFIER:
1509 ret = srcu_notifier_chain_register(
1510 &devfreq->transition_notifier_list, nb);
1518 EXPORT_SYMBOL(devfreq_register_notifier);
1521 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1522 * @devfreq: The devfreq object.
1523 * @nb: The notifier block to be unregistered.
1524 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1526 int devfreq_unregister_notifier(struct devfreq *devfreq,
1527 struct notifier_block *nb,
1536 case DEVFREQ_TRANSITION_NOTIFIER:
1537 ret = srcu_notifier_chain_unregister(
1538 &devfreq->transition_notifier_list, nb);
1546 EXPORT_SYMBOL(devfreq_unregister_notifier);
1548 struct devfreq_notifier_devres {
1549 struct devfreq *devfreq;
1550 struct notifier_block *nb;
1554 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1556 struct devfreq_notifier_devres *this = res;
1558 devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1562 * devm_devfreq_register_notifier()
1563 - Resource-managed devfreq_register_notifier()
1564 * @dev: The devfreq user device. (parent of devfreq)
1565 * @devfreq: The devfreq object.
1566 * @nb: The notifier block to be unregistered.
1567 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1569 int devm_devfreq_register_notifier(struct device *dev,
1570 struct devfreq *devfreq,
1571 struct notifier_block *nb,
1574 struct devfreq_notifier_devres *ptr;
1577 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1582 ret = devfreq_register_notifier(devfreq, nb, list);
1588 ptr->devfreq = devfreq;
1591 devres_add(dev, ptr);
1595 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1598 * devm_devfreq_unregister_notifier()
1599 - Resource-managed devfreq_unregister_notifier()
1600 * @dev: The devfreq user device. (parent of devfreq)
1601 * @devfreq: The devfreq object.
1602 * @nb: The notifier block to be unregistered.
1603 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1605 void devm_devfreq_unregister_notifier(struct device *dev,
1606 struct devfreq *devfreq,
1607 struct notifier_block *nb,
1610 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1611 devm_devfreq_dev_match, devfreq));
1613 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);