1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <linux/units.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 /* Macros to iterate over CPU policies */
37 #define for_each_suitable_policy(__policy, __active) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
39 if ((__active) == !policy_is_inactive(__policy))
41 #define for_each_active_policy(__policy) \
42 for_each_suitable_policy(__policy, true)
43 #define for_each_inactive_policy(__policy) \
44 for_each_suitable_policy(__policy, false)
46 /* Iterate over governors */
47 static LIST_HEAD(cpufreq_governor_list);
48 #define for_each_governor(__governor) \
49 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51 static char default_governor[CPUFREQ_NAME_LEN];
54 * The "cpufreq driver" - the arch- or hardware-dependent low
55 * level driver of CPUFreq support, and its spinlock. This lock
56 * also protects the cpufreq_cpu_data array.
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
62 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
63 bool cpufreq_supports_freq_invariance(void)
65 return static_branch_likely(&cpufreq_freq_invariance);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
81 static int cpufreq_set_policy(struct cpufreq_policy *policy,
82 struct cpufreq_governor *new_gov,
83 unsigned int new_pol);
86 * Two notifier lists: the "policy" list is involved in the
87 * validation process for a new CPU frequency policy; the
88 * "transition" list for kernel code that needs to handle
89 * changes to devices when the CPU clock speed changes.
90 * The mutex locks both lists.
92 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
93 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
95 static int off __read_mostly;
96 static int cpufreq_disabled(void)
100 void disable_cpufreq(void)
104 static DEFINE_MUTEX(cpufreq_governor_mutex);
106 bool have_governor_per_policy(void)
108 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
110 EXPORT_SYMBOL_GPL(have_governor_per_policy);
112 static struct kobject *cpufreq_global_kobject;
114 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116 if (have_governor_per_policy())
117 return &policy->kobj;
119 return cpufreq_global_kobject;
121 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125 struct kernel_cpustat kcpustat;
130 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
132 kcpustat_cpu_fetch(&kcpustat, cpu);
134 busy_time = kcpustat.cpustat[CPUTIME_USER];
135 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
136 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
137 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
138 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
139 busy_time += kcpustat.cpustat[CPUTIME_NICE];
141 idle_time = cur_wall_time - busy_time;
143 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
145 return div_u64(idle_time, NSEC_PER_USEC);
148 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
150 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
152 if (idle_time == -1ULL)
153 return get_cpu_idle_time_jiffy(cpu, wall);
155 idle_time += get_cpu_iowait_time_us(cpu, wall);
159 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
162 * This is a generic cpufreq init() routine which can be used by cpufreq
163 * drivers of SMP systems. It will do following:
164 * - validate & show freq table passed
165 * - set policies transition latency
166 * - policy->cpus with all possible CPUs
168 void cpufreq_generic_init(struct cpufreq_policy *policy,
169 struct cpufreq_frequency_table *table,
170 unsigned int transition_latency)
172 policy->freq_table = table;
173 policy->cpuinfo.transition_latency = transition_latency;
176 * The driver only supports the SMP configuration where all processors
177 * share the clock and voltage and clock.
179 cpumask_setall(policy->cpus);
181 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
183 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
185 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
187 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
189 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
191 unsigned int cpufreq_generic_get(unsigned int cpu)
193 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
195 if (!policy || IS_ERR(policy->clk)) {
196 pr_err("%s: No %s associated to cpu: %d\n",
197 __func__, policy ? "clk" : "policy", cpu);
201 return clk_get_rate(policy->clk) / 1000;
203 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
207 * @cpu: CPU to find the policy for.
209 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
210 * the kobject reference counter of that policy. Return a valid policy on
211 * success or NULL on failure.
213 * The policy returned by this function has to be released with the help of
214 * cpufreq_cpu_put() to balance its kobject reference counter properly.
216 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
218 struct cpufreq_policy *policy = NULL;
221 if (WARN_ON(cpu >= nr_cpu_ids))
224 /* get the cpufreq driver */
225 read_lock_irqsave(&cpufreq_driver_lock, flags);
227 if (cpufreq_driver) {
229 policy = cpufreq_cpu_get_raw(cpu);
231 kobject_get(&policy->kobj);
234 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
242 * @policy: cpufreq policy returned by cpufreq_cpu_get().
244 void cpufreq_cpu_put(struct cpufreq_policy *policy)
246 kobject_put(&policy->kobj);
248 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
252 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
254 void cpufreq_cpu_release(struct cpufreq_policy *policy)
256 if (WARN_ON(!policy))
259 lockdep_assert_held(&policy->rwsem);
261 up_write(&policy->rwsem);
263 cpufreq_cpu_put(policy);
267 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
268 * @cpu: CPU to find the policy for.
270 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
271 * if the policy returned by it is not NULL, acquire its rwsem for writing.
272 * Return the policy if it is active or release it and return NULL otherwise.
274 * The policy returned by this function has to be released with the help of
275 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
278 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
285 down_write(&policy->rwsem);
287 if (policy_is_inactive(policy)) {
288 cpufreq_cpu_release(policy);
295 /*********************************************************************
296 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
297 *********************************************************************/
300 * adjust_jiffies - Adjust the system "loops_per_jiffy".
301 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
302 * @ci: Frequency change information.
304 * This function alters the system "loops_per_jiffy" for the clock
305 * speed change. Note that loops_per_jiffy cannot be updated on SMP
306 * systems as each CPU might be scaled differently. So, use the arch
307 * per-CPU loops_per_jiffy value wherever possible.
309 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
312 static unsigned long l_p_j_ref;
313 static unsigned int l_p_j_ref_freq;
315 if (ci->flags & CPUFREQ_CONST_LOOPS)
318 if (!l_p_j_ref_freq) {
319 l_p_j_ref = loops_per_jiffy;
320 l_p_j_ref_freq = ci->old;
321 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
322 l_p_j_ref, l_p_j_ref_freq);
324 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
325 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
327 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
328 loops_per_jiffy, ci->new);
334 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
335 * @policy: cpufreq policy to enable fast frequency switching for.
336 * @freqs: contain details of the frequency update.
337 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
339 * This function calls the transition notifiers and adjust_jiffies().
341 * It is called twice on all CPU frequency changes that have external effects.
343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs,
349 BUG_ON(irqs_disabled());
351 if (cpufreq_disabled())
354 freqs->policy = policy;
355 freqs->flags = cpufreq_driver->flags;
356 pr_debug("notification %u of frequency transition to %u kHz\n",
360 case CPUFREQ_PRECHANGE:
362 * Detect if the driver reported a value as "old frequency"
363 * which is not equal to what the cpufreq core thinks is
366 if (policy->cur && policy->cur != freqs->old) {
367 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
368 freqs->old, policy->cur);
369 freqs->old = policy->cur;
372 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
373 CPUFREQ_PRECHANGE, freqs);
375 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
378 case CPUFREQ_POSTCHANGE:
379 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
380 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
381 cpumask_pr_args(policy->cpus));
383 for_each_cpu(cpu, policy->cpus)
384 trace_cpu_frequency(freqs->new, cpu);
386 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
387 CPUFREQ_POSTCHANGE, freqs);
389 cpufreq_stats_record_transition(policy, freqs->new);
390 policy->cur = freqs->new;
394 /* Do post notifications when there are chances that transition has failed */
395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396 struct cpufreq_freqs *freqs, int transition_failed)
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399 if (!transition_failed)
402 swap(freqs->old, freqs->new);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408 struct cpufreq_freqs *freqs)
412 * Catch double invocations of _begin() which lead to self-deadlock.
413 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
414 * doesn't invoke _begin() on their behalf, and hence the chances of
415 * double invocations are very low. Moreover, there are scenarios
416 * where these checks can emit false-positive warnings in these
417 * drivers; so we avoid that by skipping them altogether.
419 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
420 && current == policy->transition_task);
423 wait_event(policy->transition_wait, !policy->transition_ongoing);
425 spin_lock(&policy->transition_lock);
427 if (unlikely(policy->transition_ongoing)) {
428 spin_unlock(&policy->transition_lock);
432 policy->transition_ongoing = true;
433 policy->transition_task = current;
435 spin_unlock(&policy->transition_lock);
437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
442 struct cpufreq_freqs *freqs, int transition_failed)
444 if (WARN_ON(!policy->transition_ongoing))
447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
449 arch_set_freq_scale(policy->related_cpus,
451 policy->cpuinfo.max_freq);
453 policy->transition_ongoing = false;
454 policy->transition_task = NULL;
456 wake_up(&policy->transition_wait);
458 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
461 * Fast frequency switching status count. Positive means "enabled", negative
462 * means "disabled" and 0 means "not decided yet".
464 static int cpufreq_fast_switch_count;
465 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
467 static void cpufreq_list_transition_notifiers(void)
469 struct notifier_block *nb;
471 pr_info("Registered transition notifiers:\n");
473 mutex_lock(&cpufreq_transition_notifier_list.mutex);
475 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
476 pr_info("%pS\n", nb->notifier_call);
478 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
482 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
483 * @policy: cpufreq policy to enable fast frequency switching for.
485 * Try to enable fast frequency switching for @policy.
487 * The attempt will fail if there is at least one transition notifier registered
488 * at this point, as fast frequency switching is quite fundamentally at odds
489 * with transition notifiers. Thus if successful, it will make registration of
490 * transition notifiers fail going forward.
492 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
494 lockdep_assert_held(&policy->rwsem);
496 if (!policy->fast_switch_possible)
499 mutex_lock(&cpufreq_fast_switch_lock);
500 if (cpufreq_fast_switch_count >= 0) {
501 cpufreq_fast_switch_count++;
502 policy->fast_switch_enabled = true;
504 pr_warn("CPU%u: Fast frequency switching not enabled\n",
506 cpufreq_list_transition_notifiers();
508 mutex_unlock(&cpufreq_fast_switch_lock);
510 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
513 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
514 * @policy: cpufreq policy to disable fast frequency switching for.
516 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
518 mutex_lock(&cpufreq_fast_switch_lock);
519 if (policy->fast_switch_enabled) {
520 policy->fast_switch_enabled = false;
521 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
522 cpufreq_fast_switch_count--;
524 mutex_unlock(&cpufreq_fast_switch_lock);
526 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
528 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
529 unsigned int target_freq, unsigned int relation)
533 target_freq = clamp_val(target_freq, policy->min, policy->max);
535 if (!cpufreq_driver->target_index)
538 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
539 policy->cached_resolved_idx = idx;
540 policy->cached_target_freq = target_freq;
541 return policy->freq_table[idx].frequency;
545 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
547 * @policy: associated policy to interrogate
548 * @target_freq: target frequency to resolve.
550 * The target to driver frequency mapping is cached in the policy.
552 * Return: Lowest driver-supported frequency greater than or equal to the
553 * given target_freq, subject to policy (min/max) and driver limitations.
555 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
556 unsigned int target_freq)
558 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
560 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
562 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
564 unsigned int latency;
566 if (policy->transition_delay_us)
567 return policy->transition_delay_us;
569 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
572 * For platforms that can change the frequency very fast (< 10
573 * us), the above formula gives a decent transition delay. But
574 * for platforms where transition_latency is in milliseconds, it
575 * ends up giving unrealistic values.
577 * Cap the default transition delay to 10 ms, which seems to be
578 * a reasonable amount of time after which we should reevaluate
581 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
584 return LATENCY_MULTIPLIER;
586 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
588 /*********************************************************************
590 *********************************************************************/
591 static ssize_t show_boost(struct kobject *kobj,
592 struct kobj_attribute *attr, char *buf)
594 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
597 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
598 const char *buf, size_t count)
602 ret = sscanf(buf, "%d", &enable);
603 if (ret != 1 || enable < 0 || enable > 1)
606 if (cpufreq_boost_trigger_state(enable)) {
607 pr_err("%s: Cannot %s BOOST!\n",
608 __func__, enable ? "enable" : "disable");
612 pr_debug("%s: cpufreq BOOST %s\n",
613 __func__, enable ? "enabled" : "disabled");
617 define_one_global_rw(boost);
619 static struct cpufreq_governor *find_governor(const char *str_governor)
621 struct cpufreq_governor *t;
624 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
630 static struct cpufreq_governor *get_governor(const char *str_governor)
632 struct cpufreq_governor *t;
634 mutex_lock(&cpufreq_governor_mutex);
635 t = find_governor(str_governor);
639 if (!try_module_get(t->owner))
643 mutex_unlock(&cpufreq_governor_mutex);
648 static unsigned int cpufreq_parse_policy(char *str_governor)
650 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
651 return CPUFREQ_POLICY_PERFORMANCE;
653 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
654 return CPUFREQ_POLICY_POWERSAVE;
656 return CPUFREQ_POLICY_UNKNOWN;
660 * cpufreq_parse_governor - parse a governor string only for has_target()
661 * @str_governor: Governor name.
663 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
665 struct cpufreq_governor *t;
667 t = get_governor(str_governor);
671 if (request_module("cpufreq_%s", str_governor))
674 return get_governor(str_governor);
678 * cpufreq_per_cpu_attr_read() / show_##file_name() -
679 * print out cpufreq information
681 * Write out information from cpufreq_driver->policy[cpu]; object must be
685 #define show_one(file_name, object) \
686 static ssize_t show_##file_name \
687 (struct cpufreq_policy *policy, char *buf) \
689 return sprintf(buf, "%u\n", policy->object); \
692 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
693 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
694 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
695 show_one(scaling_min_freq, min);
696 show_one(scaling_max_freq, max);
698 __weak unsigned int arch_freq_get_on_cpu(int cpu)
703 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
708 freq = arch_freq_get_on_cpu(policy->cpu);
710 ret = sprintf(buf, "%u\n", freq);
711 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
712 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
714 ret = sprintf(buf, "%u\n", policy->cur);
719 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
721 #define store_one(file_name, object) \
722 static ssize_t store_##file_name \
723 (struct cpufreq_policy *policy, const char *buf, size_t count) \
728 ret = sscanf(buf, "%lu", &val); \
732 ret = freq_qos_update_request(policy->object##_freq_req, val);\
733 return ret >= 0 ? count : ret; \
736 store_one(scaling_min_freq, min);
737 store_one(scaling_max_freq, max);
740 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
742 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
745 unsigned int cur_freq = __cpufreq_get(policy);
748 return sprintf(buf, "%u\n", cur_freq);
750 return sprintf(buf, "<unknown>\n");
754 * show_scaling_governor - show the current policy for the specified CPU
756 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
758 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
759 return sprintf(buf, "powersave\n");
760 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
761 return sprintf(buf, "performance\n");
762 else if (policy->governor)
763 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
764 policy->governor->name);
769 * store_scaling_governor - store policy for the specified CPU
771 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
772 const char *buf, size_t count)
774 char str_governor[16];
777 ret = sscanf(buf, "%15s", str_governor);
781 if (cpufreq_driver->setpolicy) {
782 unsigned int new_pol;
784 new_pol = cpufreq_parse_policy(str_governor);
788 ret = cpufreq_set_policy(policy, NULL, new_pol);
790 struct cpufreq_governor *new_gov;
792 new_gov = cpufreq_parse_governor(str_governor);
796 ret = cpufreq_set_policy(policy, new_gov,
797 CPUFREQ_POLICY_UNKNOWN);
799 module_put(new_gov->owner);
802 return ret ? ret : count;
806 * show_scaling_driver - show the cpufreq driver currently loaded
808 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
810 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
814 * show_scaling_available_governors - show the available CPUfreq governors
816 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
820 struct cpufreq_governor *t;
823 i += sprintf(buf, "performance powersave");
827 mutex_lock(&cpufreq_governor_mutex);
828 for_each_governor(t) {
829 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
830 - (CPUFREQ_NAME_LEN + 2)))
832 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
834 mutex_unlock(&cpufreq_governor_mutex);
836 i += sprintf(&buf[i], "\n");
840 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
845 for_each_cpu(cpu, mask) {
847 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
848 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
849 if (i >= (PAGE_SIZE - 5))
852 i += sprintf(&buf[i], "\n");
855 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
858 * show_related_cpus - show the CPUs affected by each transition even if
859 * hw coordination is in use
861 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
863 return cpufreq_show_cpus(policy->related_cpus, buf);
867 * show_affected_cpus - show the CPUs affected by each transition
869 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
871 return cpufreq_show_cpus(policy->cpus, buf);
874 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
875 const char *buf, size_t count)
877 unsigned int freq = 0;
880 if (!policy->governor || !policy->governor->store_setspeed)
883 ret = sscanf(buf, "%u", &freq);
887 policy->governor->store_setspeed(policy, freq);
892 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
894 if (!policy->governor || !policy->governor->show_setspeed)
895 return sprintf(buf, "<unsupported>\n");
897 return policy->governor->show_setspeed(policy, buf);
901 * show_bios_limit - show the current cpufreq HW/BIOS limitation
903 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
907 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
909 return sprintf(buf, "%u\n", limit);
910 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
913 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
914 cpufreq_freq_attr_ro(cpuinfo_min_freq);
915 cpufreq_freq_attr_ro(cpuinfo_max_freq);
916 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
917 cpufreq_freq_attr_ro(scaling_available_governors);
918 cpufreq_freq_attr_ro(scaling_driver);
919 cpufreq_freq_attr_ro(scaling_cur_freq);
920 cpufreq_freq_attr_ro(bios_limit);
921 cpufreq_freq_attr_ro(related_cpus);
922 cpufreq_freq_attr_ro(affected_cpus);
923 cpufreq_freq_attr_rw(scaling_min_freq);
924 cpufreq_freq_attr_rw(scaling_max_freq);
925 cpufreq_freq_attr_rw(scaling_governor);
926 cpufreq_freq_attr_rw(scaling_setspeed);
928 static struct attribute *cpufreq_attrs[] = {
929 &cpuinfo_min_freq.attr,
930 &cpuinfo_max_freq.attr,
931 &cpuinfo_transition_latency.attr,
932 &scaling_min_freq.attr,
933 &scaling_max_freq.attr,
936 &scaling_governor.attr,
937 &scaling_driver.attr,
938 &scaling_available_governors.attr,
939 &scaling_setspeed.attr,
942 ATTRIBUTE_GROUPS(cpufreq);
944 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
945 #define to_attr(a) container_of(a, struct freq_attr, attr)
947 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
949 struct cpufreq_policy *policy = to_policy(kobj);
950 struct freq_attr *fattr = to_attr(attr);
951 ssize_t ret = -EBUSY;
956 down_read(&policy->rwsem);
957 if (likely(!policy_is_inactive(policy)))
958 ret = fattr->show(policy, buf);
959 up_read(&policy->rwsem);
964 static ssize_t store(struct kobject *kobj, struct attribute *attr,
965 const char *buf, size_t count)
967 struct cpufreq_policy *policy = to_policy(kobj);
968 struct freq_attr *fattr = to_attr(attr);
969 ssize_t ret = -EBUSY;
975 * cpus_read_trylock() is used here to work around a circular lock
976 * dependency problem with respect to the cpufreq_register_driver().
978 if (!cpus_read_trylock())
981 if (cpu_online(policy->cpu)) {
982 down_write(&policy->rwsem);
983 if (likely(!policy_is_inactive(policy)))
984 ret = fattr->store(policy, buf, count);
985 up_write(&policy->rwsem);
993 static void cpufreq_sysfs_release(struct kobject *kobj)
995 struct cpufreq_policy *policy = to_policy(kobj);
996 pr_debug("last reference is dropped\n");
997 complete(&policy->kobj_unregister);
1000 static const struct sysfs_ops sysfs_ops = {
1005 static struct kobj_type ktype_cpufreq = {
1006 .sysfs_ops = &sysfs_ops,
1007 .default_groups = cpufreq_groups,
1008 .release = cpufreq_sysfs_release,
1011 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1017 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1020 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1021 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1022 dev_err(dev, "cpufreq symlink creation failed\n");
1025 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1028 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1029 sysfs_remove_link(&dev->kobj, "cpufreq");
1030 cpumask_clear_cpu(cpu, policy->real_cpus);
1033 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1035 struct freq_attr **drv_attr;
1038 /* set up files for this cpu device */
1039 drv_attr = cpufreq_driver->attr;
1040 while (drv_attr && *drv_attr) {
1041 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1046 if (cpufreq_driver->get) {
1047 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1052 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1056 if (cpufreq_driver->bios_limit) {
1057 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1065 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1067 struct cpufreq_governor *gov = NULL;
1068 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1072 /* Update policy governor to the one used before hotplug. */
1073 gov = get_governor(policy->last_governor);
1075 pr_debug("Restoring governor %s for cpu %d\n",
1076 gov->name, policy->cpu);
1078 gov = get_governor(default_governor);
1082 gov = cpufreq_default_governor();
1083 __module_get(gov->owner);
1088 /* Use the default policy if there is no last_policy. */
1089 if (policy->last_policy) {
1090 pol = policy->last_policy;
1092 pol = cpufreq_parse_policy(default_governor);
1094 * In case the default governor is neither "performance"
1095 * nor "powersave", fall back to the initial policy
1096 * value set by the driver.
1098 if (pol == CPUFREQ_POLICY_UNKNOWN)
1099 pol = policy->policy;
1101 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1102 pol != CPUFREQ_POLICY_POWERSAVE)
1106 ret = cpufreq_set_policy(policy, gov, pol);
1108 module_put(gov->owner);
1113 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1117 /* Has this CPU been taken care of already? */
1118 if (cpumask_test_cpu(cpu, policy->cpus))
1121 down_write(&policy->rwsem);
1123 cpufreq_stop_governor(policy);
1125 cpumask_set_cpu(cpu, policy->cpus);
1128 ret = cpufreq_start_governor(policy);
1130 pr_err("%s: Failed to start governor\n", __func__);
1132 up_write(&policy->rwsem);
1136 void refresh_frequency_limits(struct cpufreq_policy *policy)
1138 if (!policy_is_inactive(policy)) {
1139 pr_debug("updating policy for CPU %u\n", policy->cpu);
1141 cpufreq_set_policy(policy, policy->governor, policy->policy);
1144 EXPORT_SYMBOL(refresh_frequency_limits);
1146 static void handle_update(struct work_struct *work)
1148 struct cpufreq_policy *policy =
1149 container_of(work, struct cpufreq_policy, update);
1151 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1152 down_write(&policy->rwsem);
1153 refresh_frequency_limits(policy);
1154 up_write(&policy->rwsem);
1157 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1160 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1162 schedule_work(&policy->update);
1166 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1169 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1171 schedule_work(&policy->update);
1175 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1177 struct kobject *kobj;
1178 struct completion *cmp;
1180 down_write(&policy->rwsem);
1181 cpufreq_stats_free_table(policy);
1182 kobj = &policy->kobj;
1183 cmp = &policy->kobj_unregister;
1184 up_write(&policy->rwsem);
1188 * We need to make sure that the underlying kobj is
1189 * actually not referenced anymore by anybody before we
1190 * proceed with unloading.
1192 pr_debug("waiting for dropping of refcount\n");
1193 wait_for_completion(cmp);
1194 pr_debug("wait complete\n");
1197 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1199 struct cpufreq_policy *policy;
1200 struct device *dev = get_cpu_device(cpu);
1206 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1210 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1211 goto err_free_policy;
1213 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1214 goto err_free_cpumask;
1216 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1217 goto err_free_rcpumask;
1219 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1220 cpufreq_global_kobject, "policy%u", cpu);
1222 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1224 * The entire policy object will be freed below, but the extra
1225 * memory allocated for the kobject name needs to be freed by
1226 * releasing the kobject.
1228 kobject_put(&policy->kobj);
1229 goto err_free_real_cpus;
1232 freq_constraints_init(&policy->constraints);
1234 policy->nb_min.notifier_call = cpufreq_notifier_min;
1235 policy->nb_max.notifier_call = cpufreq_notifier_max;
1237 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1240 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1241 ret, cpumask_pr_args(policy->cpus));
1242 goto err_kobj_remove;
1245 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1248 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1249 ret, cpumask_pr_args(policy->cpus));
1250 goto err_min_qos_notifier;
1253 INIT_LIST_HEAD(&policy->policy_list);
1254 init_rwsem(&policy->rwsem);
1255 spin_lock_init(&policy->transition_lock);
1256 init_waitqueue_head(&policy->transition_wait);
1257 init_completion(&policy->kobj_unregister);
1258 INIT_WORK(&policy->update, handle_update);
1263 err_min_qos_notifier:
1264 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1267 cpufreq_policy_put_kobj(policy);
1269 free_cpumask_var(policy->real_cpus);
1271 free_cpumask_var(policy->related_cpus);
1273 free_cpumask_var(policy->cpus);
1280 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1282 unsigned long flags;
1285 /* Remove policy from list */
1286 write_lock_irqsave(&cpufreq_driver_lock, flags);
1287 list_del(&policy->policy_list);
1289 for_each_cpu(cpu, policy->related_cpus)
1290 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1291 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1293 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1295 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1298 /* Cancel any pending policy->update work before freeing the policy. */
1299 cancel_work_sync(&policy->update);
1301 if (policy->max_freq_req) {
1303 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1304 * notification, since CPUFREQ_CREATE_POLICY notification was
1305 * sent after adding max_freq_req earlier.
1307 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1308 CPUFREQ_REMOVE_POLICY, policy);
1309 freq_qos_remove_request(policy->max_freq_req);
1312 freq_qos_remove_request(policy->min_freq_req);
1313 kfree(policy->min_freq_req);
1315 cpufreq_policy_put_kobj(policy);
1316 free_cpumask_var(policy->real_cpus);
1317 free_cpumask_var(policy->related_cpus);
1318 free_cpumask_var(policy->cpus);
1322 static int cpufreq_online(unsigned int cpu)
1324 struct cpufreq_policy *policy;
1326 unsigned long flags;
1330 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1332 /* Check if this CPU already has a policy to manage it */
1333 policy = per_cpu(cpufreq_cpu_data, cpu);
1335 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1336 if (!policy_is_inactive(policy))
1337 return cpufreq_add_policy_cpu(policy, cpu);
1339 /* This is the only online CPU for the policy. Start over. */
1341 down_write(&policy->rwsem);
1343 policy->governor = NULL;
1346 policy = cpufreq_policy_alloc(cpu);
1349 down_write(&policy->rwsem);
1352 if (!new_policy && cpufreq_driver->online) {
1353 ret = cpufreq_driver->online(policy);
1355 pr_debug("%s: %d: initialization failed\n", __func__,
1357 goto out_exit_policy;
1360 /* Recover policy->cpus using related_cpus */
1361 cpumask_copy(policy->cpus, policy->related_cpus);
1363 cpumask_copy(policy->cpus, cpumask_of(cpu));
1366 * Call driver. From then on the cpufreq must be able
1367 * to accept all calls to ->verify and ->setpolicy for this CPU.
1369 ret = cpufreq_driver->init(policy);
1371 pr_debug("%s: %d: initialization failed\n", __func__,
1373 goto out_free_policy;
1377 * The initialization has succeeded and the policy is online.
1378 * If there is a problem with its frequency table, take it
1379 * offline and drop it.
1381 ret = cpufreq_table_validate_and_sort(policy);
1383 goto out_offline_policy;
1385 /* related_cpus should at least include policy->cpus. */
1386 cpumask_copy(policy->related_cpus, policy->cpus);
1390 * affected cpus must always be the one, which are online. We aren't
1391 * managing offline cpus here.
1393 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1396 for_each_cpu(j, policy->related_cpus) {
1397 per_cpu(cpufreq_cpu_data, j) = policy;
1398 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1401 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1403 if (!policy->min_freq_req) {
1405 goto out_destroy_policy;
1408 ret = freq_qos_add_request(&policy->constraints,
1409 policy->min_freq_req, FREQ_QOS_MIN,
1410 FREQ_QOS_MIN_DEFAULT_VALUE);
1413 * So we don't call freq_qos_remove_request() for an
1414 * uninitialized request.
1416 kfree(policy->min_freq_req);
1417 policy->min_freq_req = NULL;
1418 goto out_destroy_policy;
1422 * This must be initialized right here to avoid calling
1423 * freq_qos_remove_request() on uninitialized request in case
1426 policy->max_freq_req = policy->min_freq_req + 1;
1428 ret = freq_qos_add_request(&policy->constraints,
1429 policy->max_freq_req, FREQ_QOS_MAX,
1430 FREQ_QOS_MAX_DEFAULT_VALUE);
1432 policy->max_freq_req = NULL;
1433 goto out_destroy_policy;
1436 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1437 CPUFREQ_CREATE_POLICY, policy);
1440 if (cpufreq_driver->get && has_target()) {
1441 policy->cur = cpufreq_driver->get(policy->cpu);
1444 pr_err("%s: ->get() failed\n", __func__);
1445 goto out_destroy_policy;
1450 * Sometimes boot loaders set CPU frequency to a value outside of
1451 * frequency table present with cpufreq core. In such cases CPU might be
1452 * unstable if it has to run on that frequency for long duration of time
1453 * and so its better to set it to a frequency which is specified in
1454 * freq-table. This also makes cpufreq stats inconsistent as
1455 * cpufreq-stats would fail to register because current frequency of CPU
1456 * isn't found in freq-table.
1458 * Because we don't want this change to effect boot process badly, we go
1459 * for the next freq which is >= policy->cur ('cur' must be set by now,
1460 * otherwise we will end up setting freq to lowest of the table as 'cur'
1461 * is initialized to zero).
1463 * We are passing target-freq as "policy->cur - 1" otherwise
1464 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1465 * equal to target-freq.
1467 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1469 unsigned int old_freq = policy->cur;
1471 /* Are we running at unknown frequency ? */
1472 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1473 if (ret == -EINVAL) {
1474 ret = __cpufreq_driver_target(policy, old_freq - 1,
1475 CPUFREQ_RELATION_L);
1478 * Reaching here after boot in a few seconds may not
1479 * mean that system will remain stable at "unknown"
1480 * frequency for longer duration. Hence, a BUG_ON().
1483 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1484 __func__, policy->cpu, old_freq, policy->cur);
1489 ret = cpufreq_add_dev_interface(policy);
1491 goto out_destroy_policy;
1493 cpufreq_stats_create_table(policy);
1495 write_lock_irqsave(&cpufreq_driver_lock, flags);
1496 list_add(&policy->policy_list, &cpufreq_policy_list);
1497 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1500 * Register with the energy model before
1501 * sched_cpufreq_governor_change() is called, which will result
1502 * in rebuilding of the sched domains, which should only be done
1503 * once the energy model is properly initialized for the policy
1506 * Also, this should be called before the policy is registered
1507 * with cooling framework.
1509 if (cpufreq_driver->register_em)
1510 cpufreq_driver->register_em(policy);
1513 ret = cpufreq_init_policy(policy);
1515 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1516 __func__, cpu, ret);
1517 goto out_destroy_policy;
1520 up_write(&policy->rwsem);
1522 kobject_uevent(&policy->kobj, KOBJ_ADD);
1524 /* Callback for handling stuff after policy is ready */
1525 if (cpufreq_driver->ready)
1526 cpufreq_driver->ready(policy);
1528 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1529 policy->cdev = of_cpufreq_cooling_register(policy);
1531 pr_debug("initialization complete\n");
1536 for_each_cpu(j, policy->real_cpus)
1537 remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1539 cpumask_clear(policy->cpus);
1542 if (cpufreq_driver->offline)
1543 cpufreq_driver->offline(policy);
1546 if (cpufreq_driver->exit)
1547 cpufreq_driver->exit(policy);
1550 up_write(&policy->rwsem);
1552 cpufreq_policy_free(policy);
1557 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1559 * @sif: Subsystem interface structure pointer (not used)
1561 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1563 struct cpufreq_policy *policy;
1564 unsigned cpu = dev->id;
1567 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1569 if (cpu_online(cpu)) {
1570 ret = cpufreq_online(cpu);
1575 /* Create sysfs link on CPU registration */
1576 policy = per_cpu(cpufreq_cpu_data, cpu);
1578 add_cpu_dev_symlink(policy, cpu, dev);
1583 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1588 cpufreq_stop_governor(policy);
1590 cpumask_clear_cpu(cpu, policy->cpus);
1592 if (!policy_is_inactive(policy)) {
1593 /* Nominate a new CPU if necessary. */
1594 if (cpu == policy->cpu)
1595 policy->cpu = cpumask_any(policy->cpus);
1597 /* Start the governor again for the active policy. */
1599 ret = cpufreq_start_governor(policy);
1601 pr_err("%s: Failed to start governor\n", __func__);
1608 strncpy(policy->last_governor, policy->governor->name,
1611 policy->last_policy = policy->policy;
1613 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1614 cpufreq_cooling_unregister(policy->cdev);
1615 policy->cdev = NULL;
1619 cpufreq_exit_governor(policy);
1622 * Perform the ->offline() during light-weight tear-down, as
1623 * that allows fast recovery when the CPU comes back.
1625 if (cpufreq_driver->offline) {
1626 cpufreq_driver->offline(policy);
1627 } else if (cpufreq_driver->exit) {
1628 cpufreq_driver->exit(policy);
1629 policy->freq_table = NULL;
1633 static int cpufreq_offline(unsigned int cpu)
1635 struct cpufreq_policy *policy;
1637 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1639 policy = cpufreq_cpu_get_raw(cpu);
1641 pr_debug("%s: No cpu_data found\n", __func__);
1645 down_write(&policy->rwsem);
1647 __cpufreq_offline(cpu, policy);
1649 up_write(&policy->rwsem);
1654 * cpufreq_remove_dev - remove a CPU device
1656 * Removes the cpufreq interface for a CPU device.
1658 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1660 unsigned int cpu = dev->id;
1661 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1666 down_write(&policy->rwsem);
1668 if (cpu_online(cpu))
1669 __cpufreq_offline(cpu, policy);
1671 remove_cpu_dev_symlink(policy, cpu, dev);
1673 if (!cpumask_empty(policy->real_cpus)) {
1674 up_write(&policy->rwsem);
1678 /* We did light-weight exit earlier, do full tear down now */
1679 if (cpufreq_driver->offline)
1680 cpufreq_driver->exit(policy);
1682 up_write(&policy->rwsem);
1684 cpufreq_policy_free(policy);
1688 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1689 * @policy: Policy managing CPUs.
1690 * @new_freq: New CPU frequency.
1692 * Adjust to the current frequency first and clean up later by either calling
1693 * cpufreq_update_policy(), or scheduling handle_update().
1695 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1696 unsigned int new_freq)
1698 struct cpufreq_freqs freqs;
1700 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1701 policy->cur, new_freq);
1703 freqs.old = policy->cur;
1704 freqs.new = new_freq;
1706 cpufreq_freq_transition_begin(policy, &freqs);
1707 cpufreq_freq_transition_end(policy, &freqs, 0);
1710 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1712 unsigned int new_freq;
1714 new_freq = cpufreq_driver->get(policy->cpu);
1719 * If fast frequency switching is used with the given policy, the check
1720 * against policy->cur is pointless, so skip it in that case.
1722 if (policy->fast_switch_enabled || !has_target())
1725 if (policy->cur != new_freq) {
1727 * For some platforms, the frequency returned by hardware may be
1728 * slightly different from what is provided in the frequency
1729 * table, for example hardware may return 499 MHz instead of 500
1730 * MHz. In such cases it is better to avoid getting into
1731 * unnecessary frequency updates.
1733 if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
1736 cpufreq_out_of_sync(policy, new_freq);
1738 schedule_work(&policy->update);
1745 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1748 * This is the last known freq, without actually getting it from the driver.
1749 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1751 unsigned int cpufreq_quick_get(unsigned int cpu)
1753 struct cpufreq_policy *policy;
1754 unsigned int ret_freq = 0;
1755 unsigned long flags;
1757 read_lock_irqsave(&cpufreq_driver_lock, flags);
1759 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1760 ret_freq = cpufreq_driver->get(cpu);
1761 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1765 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1767 policy = cpufreq_cpu_get(cpu);
1769 ret_freq = policy->cur;
1770 cpufreq_cpu_put(policy);
1775 EXPORT_SYMBOL(cpufreq_quick_get);
1778 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1781 * Just return the max possible frequency for a given CPU.
1783 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1785 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1786 unsigned int ret_freq = 0;
1789 ret_freq = policy->max;
1790 cpufreq_cpu_put(policy);
1795 EXPORT_SYMBOL(cpufreq_quick_get_max);
1798 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1801 * The default return value is the max_freq field of cpuinfo.
1803 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1805 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1806 unsigned int ret_freq = 0;
1809 ret_freq = policy->cpuinfo.max_freq;
1810 cpufreq_cpu_put(policy);
1815 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1817 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1819 if (unlikely(policy_is_inactive(policy)))
1822 return cpufreq_verify_current_freq(policy, true);
1826 * cpufreq_get - get the current CPU frequency (in kHz)
1829 * Get the CPU current (static) CPU frequency
1831 unsigned int cpufreq_get(unsigned int cpu)
1833 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1834 unsigned int ret_freq = 0;
1837 down_read(&policy->rwsem);
1838 if (cpufreq_driver->get)
1839 ret_freq = __cpufreq_get(policy);
1840 up_read(&policy->rwsem);
1842 cpufreq_cpu_put(policy);
1847 EXPORT_SYMBOL(cpufreq_get);
1849 static struct subsys_interface cpufreq_interface = {
1851 .subsys = &cpu_subsys,
1852 .add_dev = cpufreq_add_dev,
1853 .remove_dev = cpufreq_remove_dev,
1857 * In case platform wants some specific frequency to be configured
1860 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1864 if (!policy->suspend_freq) {
1865 pr_debug("%s: suspend_freq not defined\n", __func__);
1869 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1870 policy->suspend_freq);
1872 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1873 CPUFREQ_RELATION_H);
1875 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1876 __func__, policy->suspend_freq, ret);
1880 EXPORT_SYMBOL(cpufreq_generic_suspend);
1883 * cpufreq_suspend() - Suspend CPUFreq governors.
1885 * Called during system wide Suspend/Hibernate cycles for suspending governors
1886 * as some platforms can't change frequency after this point in suspend cycle.
1887 * Because some of the devices (like: i2c, regulators, etc) they use for
1888 * changing frequency are suspended quickly after this point.
1890 void cpufreq_suspend(void)
1892 struct cpufreq_policy *policy;
1894 if (!cpufreq_driver)
1897 if (!has_target() && !cpufreq_driver->suspend)
1900 pr_debug("%s: Suspending Governors\n", __func__);
1902 for_each_active_policy(policy) {
1904 down_write(&policy->rwsem);
1905 cpufreq_stop_governor(policy);
1906 up_write(&policy->rwsem);
1909 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1910 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1911 cpufreq_driver->name);
1915 cpufreq_suspended = true;
1919 * cpufreq_resume() - Resume CPUFreq governors.
1921 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1922 * are suspended with cpufreq_suspend().
1924 void cpufreq_resume(void)
1926 struct cpufreq_policy *policy;
1929 if (!cpufreq_driver)
1932 if (unlikely(!cpufreq_suspended))
1935 cpufreq_suspended = false;
1937 if (!has_target() && !cpufreq_driver->resume)
1940 pr_debug("%s: Resuming Governors\n", __func__);
1942 for_each_active_policy(policy) {
1943 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1944 pr_err("%s: Failed to resume driver: %p\n", __func__,
1946 } else if (has_target()) {
1947 down_write(&policy->rwsem);
1948 ret = cpufreq_start_governor(policy);
1949 up_write(&policy->rwsem);
1952 pr_err("%s: Failed to start governor for policy: %p\n",
1959 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1960 * @flags: Flags to test against the current cpufreq driver's flags.
1962 * Assumes that the driver is there, so callers must ensure that this is the
1965 bool cpufreq_driver_test_flags(u16 flags)
1967 return !!(cpufreq_driver->flags & flags);
1971 * cpufreq_get_current_driver - Return the current driver's name.
1973 * Return the name string of the currently registered cpufreq driver or NULL if
1976 const char *cpufreq_get_current_driver(void)
1979 return cpufreq_driver->name;
1983 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1986 * cpufreq_get_driver_data - Return current driver data.
1988 * Return the private data of the currently registered cpufreq driver, or NULL
1989 * if no cpufreq driver has been registered.
1991 void *cpufreq_get_driver_data(void)
1994 return cpufreq_driver->driver_data;
1998 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2000 /*********************************************************************
2001 * NOTIFIER LISTS INTERFACE *
2002 *********************************************************************/
2005 * cpufreq_register_notifier - Register a notifier with cpufreq.
2006 * @nb: notifier function to register.
2007 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2009 * Add a notifier to one of two lists: either a list of notifiers that run on
2010 * clock rate changes (once before and once after every transition), or a list
2011 * of notifiers that ron on cpufreq policy changes.
2013 * This function may sleep and it has the same return values as
2014 * blocking_notifier_chain_register().
2016 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2020 if (cpufreq_disabled())
2024 case CPUFREQ_TRANSITION_NOTIFIER:
2025 mutex_lock(&cpufreq_fast_switch_lock);
2027 if (cpufreq_fast_switch_count > 0) {
2028 mutex_unlock(&cpufreq_fast_switch_lock);
2031 ret = srcu_notifier_chain_register(
2032 &cpufreq_transition_notifier_list, nb);
2034 cpufreq_fast_switch_count--;
2036 mutex_unlock(&cpufreq_fast_switch_lock);
2038 case CPUFREQ_POLICY_NOTIFIER:
2039 ret = blocking_notifier_chain_register(
2040 &cpufreq_policy_notifier_list, nb);
2048 EXPORT_SYMBOL(cpufreq_register_notifier);
2051 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2052 * @nb: notifier block to be unregistered.
2053 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2055 * Remove a notifier from one of the cpufreq notifier lists.
2057 * This function may sleep and it has the same return values as
2058 * blocking_notifier_chain_unregister().
2060 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2064 if (cpufreq_disabled())
2068 case CPUFREQ_TRANSITION_NOTIFIER:
2069 mutex_lock(&cpufreq_fast_switch_lock);
2071 ret = srcu_notifier_chain_unregister(
2072 &cpufreq_transition_notifier_list, nb);
2073 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2074 cpufreq_fast_switch_count++;
2076 mutex_unlock(&cpufreq_fast_switch_lock);
2078 case CPUFREQ_POLICY_NOTIFIER:
2079 ret = blocking_notifier_chain_unregister(
2080 &cpufreq_policy_notifier_list, nb);
2088 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2091 /*********************************************************************
2093 *********************************************************************/
2096 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2097 * @policy: cpufreq policy to switch the frequency for.
2098 * @target_freq: New frequency to set (may be approximate).
2100 * Carry out a fast frequency switch without sleeping.
2102 * The driver's ->fast_switch() callback invoked by this function must be
2103 * suitable for being called from within RCU-sched read-side critical sections
2104 * and it is expected to select the minimum available frequency greater than or
2105 * equal to @target_freq (CPUFREQ_RELATION_L).
2107 * This function must not be called if policy->fast_switch_enabled is unset.
2109 * Governors calling this function must guarantee that it will never be invoked
2110 * twice in parallel for the same policy and that it will never be called in
2111 * parallel with either ->target() or ->target_index() for the same policy.
2113 * Returns the actual frequency set for the CPU.
2115 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2116 * error condition, the hardware configuration must be preserved.
2118 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2119 unsigned int target_freq)
2124 target_freq = clamp_val(target_freq, policy->min, policy->max);
2125 freq = cpufreq_driver->fast_switch(policy, target_freq);
2131 arch_set_freq_scale(policy->related_cpus, freq,
2132 policy->cpuinfo.max_freq);
2133 cpufreq_stats_record_transition(policy, freq);
2135 if (trace_cpu_frequency_enabled()) {
2136 for_each_cpu(cpu, policy->cpus)
2137 trace_cpu_frequency(freq, cpu);
2142 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2145 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2147 * @min_perf: Minimum (required) performance level (units of @capacity).
2148 * @target_perf: Target (desired) performance level (units of @capacity).
2149 * @capacity: Capacity of the target CPU.
2151 * Carry out a fast performance level switch of @cpu without sleeping.
2153 * The driver's ->adjust_perf() callback invoked by this function must be
2154 * suitable for being called from within RCU-sched read-side critical sections
2155 * and it is expected to select a suitable performance level equal to or above
2156 * @min_perf and preferably equal to or below @target_perf.
2158 * This function must not be called if policy->fast_switch_enabled is unset.
2160 * Governors calling this function must guarantee that it will never be invoked
2161 * twice in parallel for the same CPU and that it will never be called in
2162 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2165 void cpufreq_driver_adjust_perf(unsigned int cpu,
2166 unsigned long min_perf,
2167 unsigned long target_perf,
2168 unsigned long capacity)
2170 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2174 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2176 * Return 'true' if the ->adjust_perf callback is present for the
2177 * current driver or 'false' otherwise.
2179 bool cpufreq_driver_has_adjust_perf(void)
2181 return !!cpufreq_driver->adjust_perf;
2184 /* Must set freqs->new to intermediate frequency */
2185 static int __target_intermediate(struct cpufreq_policy *policy,
2186 struct cpufreq_freqs *freqs, int index)
2190 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2192 /* We don't need to switch to intermediate freq */
2196 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2197 __func__, policy->cpu, freqs->old, freqs->new);
2199 cpufreq_freq_transition_begin(policy, freqs);
2200 ret = cpufreq_driver->target_intermediate(policy, index);
2201 cpufreq_freq_transition_end(policy, freqs, ret);
2204 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2210 static int __target_index(struct cpufreq_policy *policy, int index)
2212 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2213 unsigned int restore_freq, intermediate_freq = 0;
2214 unsigned int newfreq = policy->freq_table[index].frequency;
2215 int retval = -EINVAL;
2218 if (newfreq == policy->cur)
2221 /* Save last value to restore later on errors */
2222 restore_freq = policy->cur;
2224 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2226 /* Handle switching to intermediate frequency */
2227 if (cpufreq_driver->get_intermediate) {
2228 retval = __target_intermediate(policy, &freqs, index);
2232 intermediate_freq = freqs.new;
2233 /* Set old freq to intermediate */
2234 if (intermediate_freq)
2235 freqs.old = freqs.new;
2238 freqs.new = newfreq;
2239 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2240 __func__, policy->cpu, freqs.old, freqs.new);
2242 cpufreq_freq_transition_begin(policy, &freqs);
2245 retval = cpufreq_driver->target_index(policy, index);
2247 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2251 cpufreq_freq_transition_end(policy, &freqs, retval);
2254 * Failed after setting to intermediate freq? Driver should have
2255 * reverted back to initial frequency and so should we. Check
2256 * here for intermediate_freq instead of get_intermediate, in
2257 * case we haven't switched to intermediate freq at all.
2259 if (unlikely(retval && intermediate_freq)) {
2260 freqs.old = intermediate_freq;
2261 freqs.new = restore_freq;
2262 cpufreq_freq_transition_begin(policy, &freqs);
2263 cpufreq_freq_transition_end(policy, &freqs, 0);
2270 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2271 unsigned int target_freq,
2272 unsigned int relation)
2274 unsigned int old_target_freq = target_freq;
2276 if (cpufreq_disabled())
2279 target_freq = __resolve_freq(policy, target_freq, relation);
2281 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2282 policy->cpu, target_freq, relation, old_target_freq);
2285 * This might look like a redundant call as we are checking it again
2286 * after finding index. But it is left intentionally for cases where
2287 * exactly same freq is called again and so we can save on few function
2290 if (target_freq == policy->cur &&
2291 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2294 if (cpufreq_driver->target) {
2296 * If the driver hasn't setup a single inefficient frequency,
2297 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2299 if (!policy->efficiencies_available)
2300 relation &= ~CPUFREQ_RELATION_E;
2302 return cpufreq_driver->target(policy, target_freq, relation);
2305 if (!cpufreq_driver->target_index)
2308 return __target_index(policy, policy->cached_resolved_idx);
2310 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2312 int cpufreq_driver_target(struct cpufreq_policy *policy,
2313 unsigned int target_freq,
2314 unsigned int relation)
2318 down_write(&policy->rwsem);
2320 ret = __cpufreq_driver_target(policy, target_freq, relation);
2322 up_write(&policy->rwsem);
2326 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2328 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2333 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2337 /* Don't start any governor operations if we are entering suspend */
2338 if (cpufreq_suspended)
2341 * Governor might not be initiated here if ACPI _PPC changed
2342 * notification happened, so check it.
2344 if (!policy->governor)
2347 /* Platform doesn't want dynamic frequency switching ? */
2348 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2349 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2350 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2353 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2354 policy->governor->name, gov->name);
2355 policy->governor = gov;
2361 if (!try_module_get(policy->governor->owner))
2364 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2366 if (policy->governor->init) {
2367 ret = policy->governor->init(policy);
2369 module_put(policy->governor->owner);
2374 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2379 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2381 if (cpufreq_suspended || !policy->governor)
2384 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2386 if (policy->governor->exit)
2387 policy->governor->exit(policy);
2389 module_put(policy->governor->owner);
2392 int cpufreq_start_governor(struct cpufreq_policy *policy)
2396 if (cpufreq_suspended)
2399 if (!policy->governor)
2402 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2404 if (cpufreq_driver->get)
2405 cpufreq_verify_current_freq(policy, false);
2407 if (policy->governor->start) {
2408 ret = policy->governor->start(policy);
2413 if (policy->governor->limits)
2414 policy->governor->limits(policy);
2419 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2421 if (cpufreq_suspended || !policy->governor)
2424 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2426 if (policy->governor->stop)
2427 policy->governor->stop(policy);
2430 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2432 if (cpufreq_suspended || !policy->governor)
2435 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2437 if (policy->governor->limits)
2438 policy->governor->limits(policy);
2441 int cpufreq_register_governor(struct cpufreq_governor *governor)
2448 if (cpufreq_disabled())
2451 mutex_lock(&cpufreq_governor_mutex);
2454 if (!find_governor(governor->name)) {
2456 list_add(&governor->governor_list, &cpufreq_governor_list);
2459 mutex_unlock(&cpufreq_governor_mutex);
2462 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2464 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2466 struct cpufreq_policy *policy;
2467 unsigned long flags;
2472 if (cpufreq_disabled())
2475 /* clear last_governor for all inactive policies */
2476 read_lock_irqsave(&cpufreq_driver_lock, flags);
2477 for_each_inactive_policy(policy) {
2478 if (!strcmp(policy->last_governor, governor->name)) {
2479 policy->governor = NULL;
2480 strcpy(policy->last_governor, "\0");
2483 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2485 mutex_lock(&cpufreq_governor_mutex);
2486 list_del(&governor->governor_list);
2487 mutex_unlock(&cpufreq_governor_mutex);
2489 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2492 /*********************************************************************
2493 * POLICY INTERFACE *
2494 *********************************************************************/
2497 * cpufreq_get_policy - get the current cpufreq_policy
2498 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2500 * @cpu: CPU to find the policy for
2502 * Reads the current cpufreq policy.
2504 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2506 struct cpufreq_policy *cpu_policy;
2510 cpu_policy = cpufreq_cpu_get(cpu);
2514 memcpy(policy, cpu_policy, sizeof(*policy));
2516 cpufreq_cpu_put(cpu_policy);
2519 EXPORT_SYMBOL(cpufreq_get_policy);
2522 * cpufreq_set_policy - Modify cpufreq policy parameters.
2523 * @policy: Policy object to modify.
2524 * @new_gov: Policy governor pointer.
2525 * @new_pol: Policy value (for drivers with built-in governors).
2527 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2528 * limits to be set for the policy, update @policy with the verified limits
2529 * values and either invoke the driver's ->setpolicy() callback (if present) or
2530 * carry out a governor update for @policy. That is, run the current governor's
2531 * ->limits() callback (if @new_gov points to the same object as the one in
2532 * @policy) or replace the governor for @policy with @new_gov.
2534 * The cpuinfo part of @policy is not updated by this function.
2536 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2537 struct cpufreq_governor *new_gov,
2538 unsigned int new_pol)
2540 struct cpufreq_policy_data new_data;
2541 struct cpufreq_governor *old_gov;
2544 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2545 new_data.freq_table = policy->freq_table;
2546 new_data.cpu = policy->cpu;
2548 * PM QoS framework collects all the requests from users and provide us
2549 * the final aggregated value here.
2551 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2552 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2554 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2555 new_data.cpu, new_data.min, new_data.max);
2558 * Verify that the CPU speed can be set within these limits and make sure
2561 ret = cpufreq_driver->verify(&new_data);
2566 * Resolve policy min/max to available frequencies. It ensures
2567 * no frequency resolution will neither overshoot the requested maximum
2568 * nor undershoot the requested minimum.
2570 policy->min = new_data.min;
2571 policy->max = new_data.max;
2572 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2573 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2574 trace_cpu_frequency_limits(policy);
2576 policy->cached_target_freq = UINT_MAX;
2578 pr_debug("new min and max freqs are %u - %u kHz\n",
2579 policy->min, policy->max);
2581 if (cpufreq_driver->setpolicy) {
2582 policy->policy = new_pol;
2583 pr_debug("setting range\n");
2584 return cpufreq_driver->setpolicy(policy);
2587 if (new_gov == policy->governor) {
2588 pr_debug("governor limits update\n");
2589 cpufreq_governor_limits(policy);
2593 pr_debug("governor switch\n");
2595 /* save old, working values */
2596 old_gov = policy->governor;
2597 /* end old governor */
2599 cpufreq_stop_governor(policy);
2600 cpufreq_exit_governor(policy);
2603 /* start new governor */
2604 policy->governor = new_gov;
2605 ret = cpufreq_init_governor(policy);
2607 ret = cpufreq_start_governor(policy);
2609 pr_debug("governor change\n");
2610 sched_cpufreq_governor_change(policy, old_gov);
2613 cpufreq_exit_governor(policy);
2616 /* new governor failed, so re-start old one */
2617 pr_debug("starting governor %s failed\n", policy->governor->name);
2619 policy->governor = old_gov;
2620 if (cpufreq_init_governor(policy))
2621 policy->governor = NULL;
2623 cpufreq_start_governor(policy);
2630 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2631 * @cpu: CPU to re-evaluate the policy for.
2633 * Update the current frequency for the cpufreq policy of @cpu and use
2634 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2635 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2636 * for the policy in question, among other things.
2638 void cpufreq_update_policy(unsigned int cpu)
2640 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2646 * BIOS might change freq behind our back
2647 * -> ask driver for current freq and notify governors about a change
2649 if (cpufreq_driver->get && has_target() &&
2650 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2653 refresh_frequency_limits(policy);
2656 cpufreq_cpu_release(policy);
2658 EXPORT_SYMBOL(cpufreq_update_policy);
2661 * cpufreq_update_limits - Update policy limits for a given CPU.
2662 * @cpu: CPU to update the policy limits for.
2664 * Invoke the driver's ->update_limits callback if present or call
2665 * cpufreq_update_policy() for @cpu.
2667 void cpufreq_update_limits(unsigned int cpu)
2669 if (cpufreq_driver->update_limits)
2670 cpufreq_driver->update_limits(cpu);
2672 cpufreq_update_policy(cpu);
2674 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2676 /*********************************************************************
2678 *********************************************************************/
2679 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2683 if (!policy->freq_table)
2686 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2688 pr_err("%s: Policy frequency update failed\n", __func__);
2692 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2699 int cpufreq_boost_trigger_state(int state)
2701 struct cpufreq_policy *policy;
2702 unsigned long flags;
2705 if (cpufreq_driver->boost_enabled == state)
2708 write_lock_irqsave(&cpufreq_driver_lock, flags);
2709 cpufreq_driver->boost_enabled = state;
2710 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2713 for_each_active_policy(policy) {
2714 ret = cpufreq_driver->set_boost(policy, state);
2716 goto err_reset_state;
2725 write_lock_irqsave(&cpufreq_driver_lock, flags);
2726 cpufreq_driver->boost_enabled = !state;
2727 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2729 pr_err("%s: Cannot %s BOOST\n",
2730 __func__, state ? "enable" : "disable");
2735 static bool cpufreq_boost_supported(void)
2737 return cpufreq_driver->set_boost;
2740 static int create_boost_sysfs_file(void)
2744 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2746 pr_err("%s: cannot register global BOOST sysfs file\n",
2752 static void remove_boost_sysfs_file(void)
2754 if (cpufreq_boost_supported())
2755 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2758 int cpufreq_enable_boost_support(void)
2760 if (!cpufreq_driver)
2763 if (cpufreq_boost_supported())
2766 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2768 /* This will get removed on driver unregister */
2769 return create_boost_sysfs_file();
2771 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2773 int cpufreq_boost_enabled(void)
2775 return cpufreq_driver->boost_enabled;
2777 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2779 /*********************************************************************
2780 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2781 *********************************************************************/
2782 static enum cpuhp_state hp_online;
2784 static int cpuhp_cpufreq_online(unsigned int cpu)
2786 cpufreq_online(cpu);
2791 static int cpuhp_cpufreq_offline(unsigned int cpu)
2793 cpufreq_offline(cpu);
2799 * cpufreq_register_driver - register a CPU Frequency driver
2800 * @driver_data: A struct cpufreq_driver containing the values#
2801 * submitted by the CPU Frequency driver.
2803 * Registers a CPU Frequency driver to this core code. This code
2804 * returns zero on success, -EEXIST when another driver got here first
2805 * (and isn't unregistered in the meantime).
2808 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2810 unsigned long flags;
2813 if (cpufreq_disabled())
2817 * The cpufreq core depends heavily on the availability of device
2818 * structure, make sure they are available before proceeding further.
2820 if (!get_cpu_device(0))
2821 return -EPROBE_DEFER;
2823 if (!driver_data || !driver_data->verify || !driver_data->init ||
2824 !(driver_data->setpolicy || driver_data->target_index ||
2825 driver_data->target) ||
2826 (driver_data->setpolicy && (driver_data->target_index ||
2827 driver_data->target)) ||
2828 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2829 (!driver_data->online != !driver_data->offline))
2832 pr_debug("trying to register driver %s\n", driver_data->name);
2834 /* Protect against concurrent CPU online/offline. */
2837 write_lock_irqsave(&cpufreq_driver_lock, flags);
2838 if (cpufreq_driver) {
2839 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2843 cpufreq_driver = driver_data;
2844 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2847 * Mark support for the scheduler's frequency invariance engine for
2848 * drivers that implement target(), target_index() or fast_switch().
2850 if (!cpufreq_driver->setpolicy) {
2851 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2852 pr_debug("supports frequency invariance");
2855 if (driver_data->setpolicy)
2856 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2858 if (cpufreq_boost_supported()) {
2859 ret = create_boost_sysfs_file();
2861 goto err_null_driver;
2864 ret = subsys_interface_register(&cpufreq_interface);
2866 goto err_boost_unreg;
2868 if (unlikely(list_empty(&cpufreq_policy_list))) {
2869 /* if all ->init() calls failed, unregister */
2871 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2876 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2878 cpuhp_cpufreq_online,
2879 cpuhp_cpufreq_offline);
2885 pr_debug("driver %s up and running\n", driver_data->name);
2889 subsys_interface_unregister(&cpufreq_interface);
2891 remove_boost_sysfs_file();
2893 write_lock_irqsave(&cpufreq_driver_lock, flags);
2894 cpufreq_driver = NULL;
2895 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2900 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2903 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2905 * Unregister the current CPUFreq driver. Only call this if you have
2906 * the right to do so, i.e. if you have succeeded in initialising before!
2907 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2908 * currently not initialised.
2910 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2912 unsigned long flags;
2914 if (!cpufreq_driver || (driver != cpufreq_driver))
2917 pr_debug("unregistering driver %s\n", driver->name);
2919 /* Protect against concurrent cpu hotplug */
2921 subsys_interface_unregister(&cpufreq_interface);
2922 remove_boost_sysfs_file();
2923 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2924 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2926 write_lock_irqsave(&cpufreq_driver_lock, flags);
2928 cpufreq_driver = NULL;
2930 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2935 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2937 static int __init cpufreq_core_init(void)
2939 struct cpufreq_governor *gov = cpufreq_default_governor();
2941 if (cpufreq_disabled())
2944 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2945 BUG_ON(!cpufreq_global_kobject);
2947 if (!strlen(default_governor))
2948 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2952 module_param(off, int, 0444);
2953 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2954 core_initcall(cpufreq_core_init);