1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <linux/units.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 /* Macros to iterate over CPU policies */
37 #define for_each_suitable_policy(__policy, __active) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
39 if ((__active) == !policy_is_inactive(__policy))
41 #define for_each_active_policy(__policy) \
42 for_each_suitable_policy(__policy, true)
43 #define for_each_inactive_policy(__policy) \
44 for_each_suitable_policy(__policy, false)
46 /* Iterate over governors */
47 static LIST_HEAD(cpufreq_governor_list);
48 #define for_each_governor(__governor) \
49 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51 static char default_governor[CPUFREQ_NAME_LEN];
54 * The "cpufreq driver" - the arch- or hardware-dependent low
55 * level driver of CPUFreq support, and its spinlock. This lock
56 * also protects the cpufreq_cpu_data array.
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
62 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
63 bool cpufreq_supports_freq_invariance(void)
65 return static_branch_likely(&cpufreq_freq_invariance);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 bool has_target_index(void)
78 return !!cpufreq_driver->target_index;
81 /* internal prototypes */
82 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
83 static int cpufreq_init_governor(struct cpufreq_policy *policy);
84 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
85 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
86 static int cpufreq_set_policy(struct cpufreq_policy *policy,
87 struct cpufreq_governor *new_gov,
88 unsigned int new_pol);
89 static bool cpufreq_boost_supported(void);
92 * Two notifier lists: the "policy" list is involved in the
93 * validation process for a new CPU frequency policy; the
94 * "transition" list for kernel code that needs to handle
95 * changes to devices when the CPU clock speed changes.
96 * The mutex locks both lists.
98 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
99 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
101 static int off __read_mostly;
102 static int cpufreq_disabled(void)
106 void disable_cpufreq(void)
110 static DEFINE_MUTEX(cpufreq_governor_mutex);
112 bool have_governor_per_policy(void)
114 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
116 EXPORT_SYMBOL_GPL(have_governor_per_policy);
118 static struct kobject *cpufreq_global_kobject;
120 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
122 if (have_governor_per_policy())
123 return &policy->kobj;
125 return cpufreq_global_kobject;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
129 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
131 struct kernel_cpustat kcpustat;
136 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
138 kcpustat_cpu_fetch(&kcpustat, cpu);
140 busy_time = kcpustat.cpustat[CPUTIME_USER];
141 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
142 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
143 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
144 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
145 busy_time += kcpustat.cpustat[CPUTIME_NICE];
147 idle_time = cur_wall_time - busy_time;
149 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
151 return div_u64(idle_time, NSEC_PER_USEC);
154 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
156 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
158 if (idle_time == -1ULL)
159 return get_cpu_idle_time_jiffy(cpu, wall);
161 idle_time += get_cpu_iowait_time_us(cpu, wall);
165 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
168 * This is a generic cpufreq init() routine which can be used by cpufreq
169 * drivers of SMP systems. It will do following:
170 * - validate & show freq table passed
171 * - set policies transition latency
172 * - policy->cpus with all possible CPUs
174 void cpufreq_generic_init(struct cpufreq_policy *policy,
175 struct cpufreq_frequency_table *table,
176 unsigned int transition_latency)
178 policy->freq_table = table;
179 policy->cpuinfo.transition_latency = transition_latency;
182 * The driver only supports the SMP configuration where all processors
183 * share the clock and voltage and clock.
185 cpumask_setall(policy->cpus);
187 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
191 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
195 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
197 unsigned int cpufreq_generic_get(unsigned int cpu)
199 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
201 if (!policy || IS_ERR(policy->clk)) {
202 pr_err("%s: No %s associated to cpu: %d\n",
203 __func__, policy ? "clk" : "policy", cpu);
207 return clk_get_rate(policy->clk) / 1000;
209 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
212 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
213 * @cpu: CPU to find the policy for.
215 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
216 * the kobject reference counter of that policy. Return a valid policy on
217 * success or NULL on failure.
219 * The policy returned by this function has to be released with the help of
220 * cpufreq_cpu_put() to balance its kobject reference counter properly.
222 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224 struct cpufreq_policy *policy = NULL;
227 if (WARN_ON(cpu >= nr_cpu_ids))
230 /* get the cpufreq driver */
231 read_lock_irqsave(&cpufreq_driver_lock, flags);
233 if (cpufreq_driver) {
235 policy = cpufreq_cpu_get_raw(cpu);
237 kobject_get(&policy->kobj);
240 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
244 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
247 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
248 * @policy: cpufreq policy returned by cpufreq_cpu_get().
250 void cpufreq_cpu_put(struct cpufreq_policy *policy)
252 kobject_put(&policy->kobj);
254 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
257 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
258 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
260 void cpufreq_cpu_release(struct cpufreq_policy *policy)
262 if (WARN_ON(!policy))
265 lockdep_assert_held(&policy->rwsem);
267 up_write(&policy->rwsem);
269 cpufreq_cpu_put(policy);
273 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
274 * @cpu: CPU to find the policy for.
276 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
277 * if the policy returned by it is not NULL, acquire its rwsem for writing.
278 * Return the policy if it is active or release it and return NULL otherwise.
280 * The policy returned by this function has to be released with the help of
281 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
284 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
286 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
291 down_write(&policy->rwsem);
293 if (policy_is_inactive(policy)) {
294 cpufreq_cpu_release(policy);
301 /*********************************************************************
302 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
303 *********************************************************************/
306 * adjust_jiffies - Adjust the system "loops_per_jiffy".
307 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
308 * @ci: Frequency change information.
310 * This function alters the system "loops_per_jiffy" for the clock
311 * speed change. Note that loops_per_jiffy cannot be updated on SMP
312 * systems as each CPU might be scaled differently. So, use the arch
313 * per-CPU loops_per_jiffy value wherever possible.
315 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
318 static unsigned long l_p_j_ref;
319 static unsigned int l_p_j_ref_freq;
321 if (ci->flags & CPUFREQ_CONST_LOOPS)
324 if (!l_p_j_ref_freq) {
325 l_p_j_ref = loops_per_jiffy;
326 l_p_j_ref_freq = ci->old;
327 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
328 l_p_j_ref, l_p_j_ref_freq);
330 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
331 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
333 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
334 loops_per_jiffy, ci->new);
340 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
341 * @policy: cpufreq policy to enable fast frequency switching for.
342 * @freqs: contain details of the frequency update.
343 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
345 * This function calls the transition notifiers and adjust_jiffies().
347 * It is called twice on all CPU frequency changes that have external effects.
349 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
350 struct cpufreq_freqs *freqs,
355 BUG_ON(irqs_disabled());
357 if (cpufreq_disabled())
360 freqs->policy = policy;
361 freqs->flags = cpufreq_driver->flags;
362 pr_debug("notification %u of frequency transition to %u kHz\n",
366 case CPUFREQ_PRECHANGE:
368 * Detect if the driver reported a value as "old frequency"
369 * which is not equal to what the cpufreq core thinks is
372 if (policy->cur && policy->cur != freqs->old) {
373 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
374 freqs->old, policy->cur);
375 freqs->old = policy->cur;
378 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
379 CPUFREQ_PRECHANGE, freqs);
381 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
384 case CPUFREQ_POSTCHANGE:
385 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
386 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
387 cpumask_pr_args(policy->cpus));
389 for_each_cpu(cpu, policy->cpus)
390 trace_cpu_frequency(freqs->new, cpu);
392 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
393 CPUFREQ_POSTCHANGE, freqs);
395 cpufreq_stats_record_transition(policy, freqs->new);
396 policy->cur = freqs->new;
400 /* Do post notifications when there are chances that transition has failed */
401 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
402 struct cpufreq_freqs *freqs, int transition_failed)
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405 if (!transition_failed)
408 swap(freqs->old, freqs->new);
409 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
413 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
414 struct cpufreq_freqs *freqs)
418 * Catch double invocations of _begin() which lead to self-deadlock.
419 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
420 * doesn't invoke _begin() on their behalf, and hence the chances of
421 * double invocations are very low. Moreover, there are scenarios
422 * where these checks can emit false-positive warnings in these
423 * drivers; so we avoid that by skipping them altogether.
425 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
426 && current == policy->transition_task);
429 wait_event(policy->transition_wait, !policy->transition_ongoing);
431 spin_lock(&policy->transition_lock);
433 if (unlikely(policy->transition_ongoing)) {
434 spin_unlock(&policy->transition_lock);
438 policy->transition_ongoing = true;
439 policy->transition_task = current;
441 spin_unlock(&policy->transition_lock);
443 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
445 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
447 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
448 struct cpufreq_freqs *freqs, int transition_failed)
450 if (WARN_ON(!policy->transition_ongoing))
453 cpufreq_notify_post_transition(policy, freqs, transition_failed);
455 arch_set_freq_scale(policy->related_cpus,
457 arch_scale_freq_ref(policy->cpu));
459 spin_lock(&policy->transition_lock);
460 policy->transition_ongoing = false;
461 policy->transition_task = NULL;
462 spin_unlock(&policy->transition_lock);
464 wake_up(&policy->transition_wait);
466 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
469 * Fast frequency switching status count. Positive means "enabled", negative
470 * means "disabled" and 0 means "not decided yet".
472 static int cpufreq_fast_switch_count;
473 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
475 static void cpufreq_list_transition_notifiers(void)
477 struct notifier_block *nb;
479 pr_info("Registered transition notifiers:\n");
481 mutex_lock(&cpufreq_transition_notifier_list.mutex);
483 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
484 pr_info("%pS\n", nb->notifier_call);
486 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
490 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
491 * @policy: cpufreq policy to enable fast frequency switching for.
493 * Try to enable fast frequency switching for @policy.
495 * The attempt will fail if there is at least one transition notifier registered
496 * at this point, as fast frequency switching is quite fundamentally at odds
497 * with transition notifiers. Thus if successful, it will make registration of
498 * transition notifiers fail going forward.
500 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
502 lockdep_assert_held(&policy->rwsem);
504 if (!policy->fast_switch_possible)
507 mutex_lock(&cpufreq_fast_switch_lock);
508 if (cpufreq_fast_switch_count >= 0) {
509 cpufreq_fast_switch_count++;
510 policy->fast_switch_enabled = true;
512 pr_warn("CPU%u: Fast frequency switching not enabled\n",
514 cpufreq_list_transition_notifiers();
516 mutex_unlock(&cpufreq_fast_switch_lock);
518 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
521 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
522 * @policy: cpufreq policy to disable fast frequency switching for.
524 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
526 mutex_lock(&cpufreq_fast_switch_lock);
527 if (policy->fast_switch_enabled) {
528 policy->fast_switch_enabled = false;
529 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
530 cpufreq_fast_switch_count--;
532 mutex_unlock(&cpufreq_fast_switch_lock);
534 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
536 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
537 unsigned int target_freq, unsigned int relation)
541 target_freq = clamp_val(target_freq, policy->min, policy->max);
543 if (!policy->freq_table)
546 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
547 policy->cached_resolved_idx = idx;
548 policy->cached_target_freq = target_freq;
549 return policy->freq_table[idx].frequency;
553 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
555 * @policy: associated policy to interrogate
556 * @target_freq: target frequency to resolve.
558 * The target to driver frequency mapping is cached in the policy.
560 * Return: Lowest driver-supported frequency greater than or equal to the
561 * given target_freq, subject to policy (min/max) and driver limitations.
563 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
564 unsigned int target_freq)
566 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
568 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
570 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
572 unsigned int latency;
574 if (policy->transition_delay_us)
575 return policy->transition_delay_us;
577 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
580 * For platforms that can change the frequency very fast (< 10
581 * us), the above formula gives a decent transition delay. But
582 * for platforms where transition_latency is in milliseconds, it
583 * ends up giving unrealistic values.
585 * Cap the default transition delay to 10 ms, which seems to be
586 * a reasonable amount of time after which we should reevaluate
589 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
592 return LATENCY_MULTIPLIER;
594 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
596 /*********************************************************************
598 *********************************************************************/
599 static ssize_t show_boost(struct kobject *kobj,
600 struct kobj_attribute *attr, char *buf)
602 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
605 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
606 const char *buf, size_t count)
610 ret = sscanf(buf, "%d", &enable);
611 if (ret != 1 || enable < 0 || enable > 1)
614 if (cpufreq_boost_trigger_state(enable)) {
615 pr_err("%s: Cannot %s BOOST!\n",
616 __func__, enable ? "enable" : "disable");
620 pr_debug("%s: cpufreq BOOST %s\n",
621 __func__, enable ? "enabled" : "disabled");
625 define_one_global_rw(boost);
627 static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
629 return sysfs_emit(buf, "%d\n", policy->boost_enabled);
632 static ssize_t store_local_boost(struct cpufreq_policy *policy,
633 const char *buf, size_t count)
637 ret = kstrtoint(buf, 10, &enable);
638 if (ret || enable < 0 || enable > 1)
641 if (!cpufreq_driver->boost_enabled)
644 if (policy->boost_enabled == enable)
647 policy->boost_enabled = enable;
650 ret = cpufreq_driver->set_boost(policy, enable);
654 policy->boost_enabled = !policy->boost_enabled;
661 static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
663 static struct cpufreq_governor *find_governor(const char *str_governor)
665 struct cpufreq_governor *t;
668 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
674 static struct cpufreq_governor *get_governor(const char *str_governor)
676 struct cpufreq_governor *t;
678 mutex_lock(&cpufreq_governor_mutex);
679 t = find_governor(str_governor);
683 if (!try_module_get(t->owner))
687 mutex_unlock(&cpufreq_governor_mutex);
692 static unsigned int cpufreq_parse_policy(char *str_governor)
694 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
695 return CPUFREQ_POLICY_PERFORMANCE;
697 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
698 return CPUFREQ_POLICY_POWERSAVE;
700 return CPUFREQ_POLICY_UNKNOWN;
704 * cpufreq_parse_governor - parse a governor string only for has_target()
705 * @str_governor: Governor name.
707 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
709 struct cpufreq_governor *t;
711 t = get_governor(str_governor);
715 if (request_module("cpufreq_%s", str_governor))
718 return get_governor(str_governor);
722 * cpufreq_per_cpu_attr_read() / show_##file_name() -
723 * print out cpufreq information
725 * Write out information from cpufreq_driver->policy[cpu]; object must be
729 #define show_one(file_name, object) \
730 static ssize_t show_##file_name \
731 (struct cpufreq_policy *policy, char *buf) \
733 return sprintf(buf, "%u\n", policy->object); \
736 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
737 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
738 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
739 show_one(scaling_min_freq, min);
740 show_one(scaling_max_freq, max);
742 __weak unsigned int arch_freq_get_on_cpu(int cpu)
747 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
752 freq = arch_freq_get_on_cpu(policy->cpu);
754 ret = sprintf(buf, "%u\n", freq);
755 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
756 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
758 ret = sprintf(buf, "%u\n", policy->cur);
763 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
765 #define store_one(file_name, object) \
766 static ssize_t store_##file_name \
767 (struct cpufreq_policy *policy, const char *buf, size_t count) \
772 ret = kstrtoul(buf, 0, &val); \
776 ret = freq_qos_update_request(policy->object##_freq_req, val);\
777 return ret >= 0 ? count : ret; \
780 store_one(scaling_min_freq, min);
781 store_one(scaling_max_freq, max);
784 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
786 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
789 unsigned int cur_freq = __cpufreq_get(policy);
792 return sprintf(buf, "%u\n", cur_freq);
794 return sprintf(buf, "<unknown>\n");
798 * show_scaling_governor - show the current policy for the specified CPU
800 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
802 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
803 return sprintf(buf, "powersave\n");
804 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
805 return sprintf(buf, "performance\n");
806 else if (policy->governor)
807 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
808 policy->governor->name);
813 * store_scaling_governor - store policy for the specified CPU
815 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
816 const char *buf, size_t count)
818 char str_governor[16];
821 ret = sscanf(buf, "%15s", str_governor);
825 if (cpufreq_driver->setpolicy) {
826 unsigned int new_pol;
828 new_pol = cpufreq_parse_policy(str_governor);
832 ret = cpufreq_set_policy(policy, NULL, new_pol);
834 struct cpufreq_governor *new_gov;
836 new_gov = cpufreq_parse_governor(str_governor);
840 ret = cpufreq_set_policy(policy, new_gov,
841 CPUFREQ_POLICY_UNKNOWN);
843 module_put(new_gov->owner);
846 return ret ? ret : count;
850 * show_scaling_driver - show the cpufreq driver currently loaded
852 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
854 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
858 * show_scaling_available_governors - show the available CPUfreq governors
860 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
864 struct cpufreq_governor *t;
867 i += sprintf(buf, "performance powersave");
871 mutex_lock(&cpufreq_governor_mutex);
872 for_each_governor(t) {
873 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
874 - (CPUFREQ_NAME_LEN + 2)))
876 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
878 mutex_unlock(&cpufreq_governor_mutex);
880 i += sprintf(&buf[i], "\n");
884 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
889 for_each_cpu(cpu, mask) {
890 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
891 if (i >= (PAGE_SIZE - 5))
895 /* Remove the extra space at the end */
898 i += sprintf(&buf[i], "\n");
901 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
904 * show_related_cpus - show the CPUs affected by each transition even if
905 * hw coordination is in use
907 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
909 return cpufreq_show_cpus(policy->related_cpus, buf);
913 * show_affected_cpus - show the CPUs affected by each transition
915 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
917 return cpufreq_show_cpus(policy->cpus, buf);
920 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
921 const char *buf, size_t count)
923 unsigned int freq = 0;
926 if (!policy->governor || !policy->governor->store_setspeed)
929 ret = sscanf(buf, "%u", &freq);
933 policy->governor->store_setspeed(policy, freq);
938 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
940 if (!policy->governor || !policy->governor->show_setspeed)
941 return sprintf(buf, "<unsupported>\n");
943 return policy->governor->show_setspeed(policy, buf);
947 * show_bios_limit - show the current cpufreq HW/BIOS limitation
949 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
953 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
955 return sprintf(buf, "%u\n", limit);
956 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
959 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
960 cpufreq_freq_attr_ro(cpuinfo_min_freq);
961 cpufreq_freq_attr_ro(cpuinfo_max_freq);
962 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
963 cpufreq_freq_attr_ro(scaling_available_governors);
964 cpufreq_freq_attr_ro(scaling_driver);
965 cpufreq_freq_attr_ro(scaling_cur_freq);
966 cpufreq_freq_attr_ro(bios_limit);
967 cpufreq_freq_attr_ro(related_cpus);
968 cpufreq_freq_attr_ro(affected_cpus);
969 cpufreq_freq_attr_rw(scaling_min_freq);
970 cpufreq_freq_attr_rw(scaling_max_freq);
971 cpufreq_freq_attr_rw(scaling_governor);
972 cpufreq_freq_attr_rw(scaling_setspeed);
974 static struct attribute *cpufreq_attrs[] = {
975 &cpuinfo_min_freq.attr,
976 &cpuinfo_max_freq.attr,
977 &cpuinfo_transition_latency.attr,
978 &scaling_min_freq.attr,
979 &scaling_max_freq.attr,
982 &scaling_governor.attr,
983 &scaling_driver.attr,
984 &scaling_available_governors.attr,
985 &scaling_setspeed.attr,
988 ATTRIBUTE_GROUPS(cpufreq);
990 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
991 #define to_attr(a) container_of(a, struct freq_attr, attr)
993 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
995 struct cpufreq_policy *policy = to_policy(kobj);
996 struct freq_attr *fattr = to_attr(attr);
997 ssize_t ret = -EBUSY;
1002 down_read(&policy->rwsem);
1003 if (likely(!policy_is_inactive(policy)))
1004 ret = fattr->show(policy, buf);
1005 up_read(&policy->rwsem);
1010 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1011 const char *buf, size_t count)
1013 struct cpufreq_policy *policy = to_policy(kobj);
1014 struct freq_attr *fattr = to_attr(attr);
1015 ssize_t ret = -EBUSY;
1020 down_write(&policy->rwsem);
1021 if (likely(!policy_is_inactive(policy)))
1022 ret = fattr->store(policy, buf, count);
1023 up_write(&policy->rwsem);
1028 static void cpufreq_sysfs_release(struct kobject *kobj)
1030 struct cpufreq_policy *policy = to_policy(kobj);
1031 pr_debug("last reference is dropped\n");
1032 complete(&policy->kobj_unregister);
1035 static const struct sysfs_ops sysfs_ops = {
1040 static const struct kobj_type ktype_cpufreq = {
1041 .sysfs_ops = &sysfs_ops,
1042 .default_groups = cpufreq_groups,
1043 .release = cpufreq_sysfs_release,
1046 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1052 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1055 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1056 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1057 dev_err(dev, "cpufreq symlink creation failed\n");
1060 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1063 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1064 sysfs_remove_link(&dev->kobj, "cpufreq");
1065 cpumask_clear_cpu(cpu, policy->real_cpus);
1068 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1070 struct freq_attr **drv_attr;
1073 /* set up files for this cpu device */
1074 drv_attr = cpufreq_driver->attr;
1075 while (drv_attr && *drv_attr) {
1076 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1081 if (cpufreq_driver->get) {
1082 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1087 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1091 if (cpufreq_driver->bios_limit) {
1092 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1097 if (cpufreq_boost_supported()) {
1098 ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1106 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1108 struct cpufreq_governor *gov = NULL;
1109 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1113 /* Update policy governor to the one used before hotplug. */
1114 gov = get_governor(policy->last_governor);
1116 pr_debug("Restoring governor %s for cpu %d\n",
1117 gov->name, policy->cpu);
1119 gov = get_governor(default_governor);
1123 gov = cpufreq_default_governor();
1124 __module_get(gov->owner);
1129 /* Use the default policy if there is no last_policy. */
1130 if (policy->last_policy) {
1131 pol = policy->last_policy;
1133 pol = cpufreq_parse_policy(default_governor);
1135 * In case the default governor is neither "performance"
1136 * nor "powersave", fall back to the initial policy
1137 * value set by the driver.
1139 if (pol == CPUFREQ_POLICY_UNKNOWN)
1140 pol = policy->policy;
1142 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1143 pol != CPUFREQ_POLICY_POWERSAVE)
1147 ret = cpufreq_set_policy(policy, gov, pol);
1149 module_put(gov->owner);
1154 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1158 /* Has this CPU been taken care of already? */
1159 if (cpumask_test_cpu(cpu, policy->cpus))
1162 down_write(&policy->rwsem);
1164 cpufreq_stop_governor(policy);
1166 cpumask_set_cpu(cpu, policy->cpus);
1169 ret = cpufreq_start_governor(policy);
1171 pr_err("%s: Failed to start governor\n", __func__);
1173 up_write(&policy->rwsem);
1177 void refresh_frequency_limits(struct cpufreq_policy *policy)
1179 if (!policy_is_inactive(policy)) {
1180 pr_debug("updating policy for CPU %u\n", policy->cpu);
1182 cpufreq_set_policy(policy, policy->governor, policy->policy);
1185 EXPORT_SYMBOL(refresh_frequency_limits);
1187 static void handle_update(struct work_struct *work)
1189 struct cpufreq_policy *policy =
1190 container_of(work, struct cpufreq_policy, update);
1192 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1193 down_write(&policy->rwsem);
1194 refresh_frequency_limits(policy);
1195 up_write(&policy->rwsem);
1198 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1201 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1203 schedule_work(&policy->update);
1207 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1210 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1212 schedule_work(&policy->update);
1216 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1218 struct kobject *kobj;
1219 struct completion *cmp;
1221 down_write(&policy->rwsem);
1222 cpufreq_stats_free_table(policy);
1223 kobj = &policy->kobj;
1224 cmp = &policy->kobj_unregister;
1225 up_write(&policy->rwsem);
1229 * We need to make sure that the underlying kobj is
1230 * actually not referenced anymore by anybody before we
1231 * proceed with unloading.
1233 pr_debug("waiting for dropping of refcount\n");
1234 wait_for_completion(cmp);
1235 pr_debug("wait complete\n");
1238 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1240 struct cpufreq_policy *policy;
1241 struct device *dev = get_cpu_device(cpu);
1247 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1251 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1252 goto err_free_policy;
1254 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1255 goto err_free_cpumask;
1257 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1258 goto err_free_rcpumask;
1260 init_completion(&policy->kobj_unregister);
1261 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1262 cpufreq_global_kobject, "policy%u", cpu);
1264 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1266 * The entire policy object will be freed below, but the extra
1267 * memory allocated for the kobject name needs to be freed by
1268 * releasing the kobject.
1270 kobject_put(&policy->kobj);
1271 goto err_free_real_cpus;
1274 freq_constraints_init(&policy->constraints);
1276 policy->nb_min.notifier_call = cpufreq_notifier_min;
1277 policy->nb_max.notifier_call = cpufreq_notifier_max;
1279 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1282 dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1284 goto err_kobj_remove;
1287 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1290 dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1292 goto err_min_qos_notifier;
1295 INIT_LIST_HEAD(&policy->policy_list);
1296 init_rwsem(&policy->rwsem);
1297 spin_lock_init(&policy->transition_lock);
1298 init_waitqueue_head(&policy->transition_wait);
1299 INIT_WORK(&policy->update, handle_update);
1304 err_min_qos_notifier:
1305 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1308 cpufreq_policy_put_kobj(policy);
1310 free_cpumask_var(policy->real_cpus);
1312 free_cpumask_var(policy->related_cpus);
1314 free_cpumask_var(policy->cpus);
1321 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1323 unsigned long flags;
1327 * The callers must ensure the policy is inactive by now, to avoid any
1328 * races with show()/store() callbacks.
1330 if (unlikely(!policy_is_inactive(policy)))
1331 pr_warn("%s: Freeing active policy\n", __func__);
1333 /* Remove policy from list */
1334 write_lock_irqsave(&cpufreq_driver_lock, flags);
1335 list_del(&policy->policy_list);
1337 for_each_cpu(cpu, policy->related_cpus)
1338 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1339 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1341 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1343 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1346 /* Cancel any pending policy->update work before freeing the policy. */
1347 cancel_work_sync(&policy->update);
1349 if (policy->max_freq_req) {
1351 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1352 * notification, since CPUFREQ_CREATE_POLICY notification was
1353 * sent after adding max_freq_req earlier.
1355 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1356 CPUFREQ_REMOVE_POLICY, policy);
1357 freq_qos_remove_request(policy->max_freq_req);
1360 freq_qos_remove_request(policy->min_freq_req);
1361 kfree(policy->min_freq_req);
1363 cpufreq_policy_put_kobj(policy);
1364 free_cpumask_var(policy->real_cpus);
1365 free_cpumask_var(policy->related_cpus);
1366 free_cpumask_var(policy->cpus);
1370 static int cpufreq_online(unsigned int cpu)
1372 struct cpufreq_policy *policy;
1374 unsigned long flags;
1378 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1380 /* Check if this CPU already has a policy to manage it */
1381 policy = per_cpu(cpufreq_cpu_data, cpu);
1383 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1384 if (!policy_is_inactive(policy))
1385 return cpufreq_add_policy_cpu(policy, cpu);
1387 /* This is the only online CPU for the policy. Start over. */
1389 down_write(&policy->rwsem);
1391 policy->governor = NULL;
1394 policy = cpufreq_policy_alloc(cpu);
1397 down_write(&policy->rwsem);
1400 if (!new_policy && cpufreq_driver->online) {
1401 /* Recover policy->cpus using related_cpus */
1402 cpumask_copy(policy->cpus, policy->related_cpus);
1404 ret = cpufreq_driver->online(policy);
1406 pr_debug("%s: %d: initialization failed\n", __func__,
1408 goto out_exit_policy;
1411 cpumask_copy(policy->cpus, cpumask_of(cpu));
1414 * Call driver. From then on the cpufreq must be able
1415 * to accept all calls to ->verify and ->setpolicy for this CPU.
1417 ret = cpufreq_driver->init(policy);
1419 pr_debug("%s: %d: initialization failed\n", __func__,
1421 goto out_free_policy;
1424 /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
1425 policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
1428 * The initialization has succeeded and the policy is online.
1429 * If there is a problem with its frequency table, take it
1430 * offline and drop it.
1432 ret = cpufreq_table_validate_and_sort(policy);
1434 goto out_offline_policy;
1436 /* related_cpus should at least include policy->cpus. */
1437 cpumask_copy(policy->related_cpus, policy->cpus);
1441 * affected cpus must always be the one, which are online. We aren't
1442 * managing offline cpus here.
1444 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1447 for_each_cpu(j, policy->related_cpus) {
1448 per_cpu(cpufreq_cpu_data, j) = policy;
1449 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1452 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1454 if (!policy->min_freq_req) {
1456 goto out_destroy_policy;
1459 ret = freq_qos_add_request(&policy->constraints,
1460 policy->min_freq_req, FREQ_QOS_MIN,
1461 FREQ_QOS_MIN_DEFAULT_VALUE);
1464 * So we don't call freq_qos_remove_request() for an
1465 * uninitialized request.
1467 kfree(policy->min_freq_req);
1468 policy->min_freq_req = NULL;
1469 goto out_destroy_policy;
1473 * This must be initialized right here to avoid calling
1474 * freq_qos_remove_request() on uninitialized request in case
1477 policy->max_freq_req = policy->min_freq_req + 1;
1479 ret = freq_qos_add_request(&policy->constraints,
1480 policy->max_freq_req, FREQ_QOS_MAX,
1481 FREQ_QOS_MAX_DEFAULT_VALUE);
1483 policy->max_freq_req = NULL;
1484 goto out_destroy_policy;
1487 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1488 CPUFREQ_CREATE_POLICY, policy);
1491 if (cpufreq_driver->get && has_target()) {
1492 policy->cur = cpufreq_driver->get(policy->cpu);
1495 pr_err("%s: ->get() failed\n", __func__);
1496 goto out_destroy_policy;
1501 * Sometimes boot loaders set CPU frequency to a value outside of
1502 * frequency table present with cpufreq core. In such cases CPU might be
1503 * unstable if it has to run on that frequency for long duration of time
1504 * and so its better to set it to a frequency which is specified in
1505 * freq-table. This also makes cpufreq stats inconsistent as
1506 * cpufreq-stats would fail to register because current frequency of CPU
1507 * isn't found in freq-table.
1509 * Because we don't want this change to effect boot process badly, we go
1510 * for the next freq which is >= policy->cur ('cur' must be set by now,
1511 * otherwise we will end up setting freq to lowest of the table as 'cur'
1512 * is initialized to zero).
1514 * We are passing target-freq as "policy->cur - 1" otherwise
1515 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1516 * equal to target-freq.
1518 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1520 unsigned int old_freq = policy->cur;
1522 /* Are we running at unknown frequency ? */
1523 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1524 if (ret == -EINVAL) {
1525 ret = __cpufreq_driver_target(policy, old_freq - 1,
1526 CPUFREQ_RELATION_L);
1529 * Reaching here after boot in a few seconds may not
1530 * mean that system will remain stable at "unknown"
1531 * frequency for longer duration. Hence, a BUG_ON().
1534 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1535 __func__, policy->cpu, old_freq, policy->cur);
1540 ret = cpufreq_add_dev_interface(policy);
1542 goto out_destroy_policy;
1544 cpufreq_stats_create_table(policy);
1546 write_lock_irqsave(&cpufreq_driver_lock, flags);
1547 list_add(&policy->policy_list, &cpufreq_policy_list);
1548 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1551 * Register with the energy model before
1552 * sugov_eas_rebuild_sd() is called, which will result
1553 * in rebuilding of the sched domains, which should only be done
1554 * once the energy model is properly initialized for the policy
1557 * Also, this should be called before the policy is registered
1558 * with cooling framework.
1560 if (cpufreq_driver->register_em)
1561 cpufreq_driver->register_em(policy);
1564 ret = cpufreq_init_policy(policy);
1566 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1567 __func__, cpu, ret);
1568 goto out_destroy_policy;
1571 up_write(&policy->rwsem);
1573 kobject_uevent(&policy->kobj, KOBJ_ADD);
1575 /* Callback for handling stuff after policy is ready */
1576 if (cpufreq_driver->ready)
1577 cpufreq_driver->ready(policy);
1579 /* Register cpufreq cooling only for a new policy */
1580 if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1581 policy->cdev = of_cpufreq_cooling_register(policy);
1583 pr_debug("initialization complete\n");
1588 for_each_cpu(j, policy->real_cpus)
1589 remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1592 if (cpufreq_driver->offline)
1593 cpufreq_driver->offline(policy);
1596 if (cpufreq_driver->exit)
1597 cpufreq_driver->exit(policy);
1600 cpumask_clear(policy->cpus);
1601 up_write(&policy->rwsem);
1603 cpufreq_policy_free(policy);
1608 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1610 * @sif: Subsystem interface structure pointer (not used)
1612 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1614 struct cpufreq_policy *policy;
1615 unsigned cpu = dev->id;
1618 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1620 if (cpu_online(cpu)) {
1621 ret = cpufreq_online(cpu);
1626 /* Create sysfs link on CPU registration */
1627 policy = per_cpu(cpufreq_cpu_data, cpu);
1629 add_cpu_dev_symlink(policy, cpu, dev);
1634 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1639 cpufreq_stop_governor(policy);
1641 cpumask_clear_cpu(cpu, policy->cpus);
1643 if (!policy_is_inactive(policy)) {
1644 /* Nominate a new CPU if necessary. */
1645 if (cpu == policy->cpu)
1646 policy->cpu = cpumask_any(policy->cpus);
1648 /* Start the governor again for the active policy. */
1650 ret = cpufreq_start_governor(policy);
1652 pr_err("%s: Failed to start governor\n", __func__);
1659 strscpy(policy->last_governor, policy->governor->name,
1662 policy->last_policy = policy->policy;
1665 cpufreq_exit_governor(policy);
1668 * Perform the ->offline() during light-weight tear-down, as
1669 * that allows fast recovery when the CPU comes back.
1671 if (cpufreq_driver->offline) {
1672 cpufreq_driver->offline(policy);
1673 } else if (cpufreq_driver->exit) {
1674 cpufreq_driver->exit(policy);
1675 policy->freq_table = NULL;
1679 static int cpufreq_offline(unsigned int cpu)
1681 struct cpufreq_policy *policy;
1683 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1685 policy = cpufreq_cpu_get_raw(cpu);
1687 pr_debug("%s: No cpu_data found\n", __func__);
1691 down_write(&policy->rwsem);
1693 __cpufreq_offline(cpu, policy);
1695 up_write(&policy->rwsem);
1700 * cpufreq_remove_dev - remove a CPU device
1702 * Removes the cpufreq interface for a CPU device.
1704 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1706 unsigned int cpu = dev->id;
1707 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1712 down_write(&policy->rwsem);
1714 if (cpu_online(cpu))
1715 __cpufreq_offline(cpu, policy);
1717 remove_cpu_dev_symlink(policy, cpu, dev);
1719 if (!cpumask_empty(policy->real_cpus)) {
1720 up_write(&policy->rwsem);
1725 * Unregister cpufreq cooling once all the CPUs of the policy are
1728 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1729 cpufreq_cooling_unregister(policy->cdev);
1730 policy->cdev = NULL;
1733 /* We did light-weight exit earlier, do full tear down now */
1734 if (cpufreq_driver->offline)
1735 cpufreq_driver->exit(policy);
1737 up_write(&policy->rwsem);
1739 cpufreq_policy_free(policy);
1743 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1744 * @policy: Policy managing CPUs.
1745 * @new_freq: New CPU frequency.
1747 * Adjust to the current frequency first and clean up later by either calling
1748 * cpufreq_update_policy(), or scheduling handle_update().
1750 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1751 unsigned int new_freq)
1753 struct cpufreq_freqs freqs;
1755 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1756 policy->cur, new_freq);
1758 freqs.old = policy->cur;
1759 freqs.new = new_freq;
1761 cpufreq_freq_transition_begin(policy, &freqs);
1762 cpufreq_freq_transition_end(policy, &freqs, 0);
1765 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1767 unsigned int new_freq;
1769 new_freq = cpufreq_driver->get(policy->cpu);
1774 * If fast frequency switching is used with the given policy, the check
1775 * against policy->cur is pointless, so skip it in that case.
1777 if (policy->fast_switch_enabled || !has_target())
1780 if (policy->cur != new_freq) {
1782 * For some platforms, the frequency returned by hardware may be
1783 * slightly different from what is provided in the frequency
1784 * table, for example hardware may return 499 MHz instead of 500
1785 * MHz. In such cases it is better to avoid getting into
1786 * unnecessary frequency updates.
1788 if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1791 cpufreq_out_of_sync(policy, new_freq);
1793 schedule_work(&policy->update);
1800 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1803 * This is the last known freq, without actually getting it from the driver.
1804 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1806 unsigned int cpufreq_quick_get(unsigned int cpu)
1808 struct cpufreq_policy *policy;
1809 unsigned int ret_freq = 0;
1810 unsigned long flags;
1812 read_lock_irqsave(&cpufreq_driver_lock, flags);
1814 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1815 ret_freq = cpufreq_driver->get(cpu);
1816 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1820 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1822 policy = cpufreq_cpu_get(cpu);
1824 ret_freq = policy->cur;
1825 cpufreq_cpu_put(policy);
1830 EXPORT_SYMBOL(cpufreq_quick_get);
1833 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1836 * Just return the max possible frequency for a given CPU.
1838 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1840 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1841 unsigned int ret_freq = 0;
1844 ret_freq = policy->max;
1845 cpufreq_cpu_put(policy);
1850 EXPORT_SYMBOL(cpufreq_quick_get_max);
1853 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1856 * The default return value is the max_freq field of cpuinfo.
1858 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1860 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1861 unsigned int ret_freq = 0;
1864 ret_freq = policy->cpuinfo.max_freq;
1865 cpufreq_cpu_put(policy);
1870 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1872 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1874 if (unlikely(policy_is_inactive(policy)))
1877 return cpufreq_verify_current_freq(policy, true);
1881 * cpufreq_get - get the current CPU frequency (in kHz)
1884 * Get the CPU current (static) CPU frequency
1886 unsigned int cpufreq_get(unsigned int cpu)
1888 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1889 unsigned int ret_freq = 0;
1892 down_read(&policy->rwsem);
1893 if (cpufreq_driver->get)
1894 ret_freq = __cpufreq_get(policy);
1895 up_read(&policy->rwsem);
1897 cpufreq_cpu_put(policy);
1902 EXPORT_SYMBOL(cpufreq_get);
1904 static struct subsys_interface cpufreq_interface = {
1906 .subsys = &cpu_subsys,
1907 .add_dev = cpufreq_add_dev,
1908 .remove_dev = cpufreq_remove_dev,
1912 * In case platform wants some specific frequency to be configured
1915 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1919 if (!policy->suspend_freq) {
1920 pr_debug("%s: suspend_freq not defined\n", __func__);
1924 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1925 policy->suspend_freq);
1927 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1928 CPUFREQ_RELATION_H);
1930 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1931 __func__, policy->suspend_freq, ret);
1935 EXPORT_SYMBOL(cpufreq_generic_suspend);
1938 * cpufreq_suspend() - Suspend CPUFreq governors.
1940 * Called during system wide Suspend/Hibernate cycles for suspending governors
1941 * as some platforms can't change frequency after this point in suspend cycle.
1942 * Because some of the devices (like: i2c, regulators, etc) they use for
1943 * changing frequency are suspended quickly after this point.
1945 void cpufreq_suspend(void)
1947 struct cpufreq_policy *policy;
1949 if (!cpufreq_driver)
1952 if (!has_target() && !cpufreq_driver->suspend)
1955 pr_debug("%s: Suspending Governors\n", __func__);
1957 for_each_active_policy(policy) {
1959 down_write(&policy->rwsem);
1960 cpufreq_stop_governor(policy);
1961 up_write(&policy->rwsem);
1964 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1965 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1966 cpufreq_driver->name);
1970 cpufreq_suspended = true;
1974 * cpufreq_resume() - Resume CPUFreq governors.
1976 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1977 * are suspended with cpufreq_suspend().
1979 void cpufreq_resume(void)
1981 struct cpufreq_policy *policy;
1984 if (!cpufreq_driver)
1987 if (unlikely(!cpufreq_suspended))
1990 cpufreq_suspended = false;
1992 if (!has_target() && !cpufreq_driver->resume)
1995 pr_debug("%s: Resuming Governors\n", __func__);
1997 for_each_active_policy(policy) {
1998 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1999 pr_err("%s: Failed to resume driver: %s\n", __func__,
2000 cpufreq_driver->name);
2001 } else if (has_target()) {
2002 down_write(&policy->rwsem);
2003 ret = cpufreq_start_governor(policy);
2004 up_write(&policy->rwsem);
2007 pr_err("%s: Failed to start governor for CPU%u's policy\n",
2008 __func__, policy->cpu);
2014 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2015 * @flags: Flags to test against the current cpufreq driver's flags.
2017 * Assumes that the driver is there, so callers must ensure that this is the
2020 bool cpufreq_driver_test_flags(u16 flags)
2022 return !!(cpufreq_driver->flags & flags);
2026 * cpufreq_get_current_driver - Return the current driver's name.
2028 * Return the name string of the currently registered cpufreq driver or NULL if
2031 const char *cpufreq_get_current_driver(void)
2034 return cpufreq_driver->name;
2038 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2041 * cpufreq_get_driver_data - Return current driver data.
2043 * Return the private data of the currently registered cpufreq driver, or NULL
2044 * if no cpufreq driver has been registered.
2046 void *cpufreq_get_driver_data(void)
2049 return cpufreq_driver->driver_data;
2053 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2055 /*********************************************************************
2056 * NOTIFIER LISTS INTERFACE *
2057 *********************************************************************/
2060 * cpufreq_register_notifier - Register a notifier with cpufreq.
2061 * @nb: notifier function to register.
2062 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2064 * Add a notifier to one of two lists: either a list of notifiers that run on
2065 * clock rate changes (once before and once after every transition), or a list
2066 * of notifiers that ron on cpufreq policy changes.
2068 * This function may sleep and it has the same return values as
2069 * blocking_notifier_chain_register().
2071 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2075 if (cpufreq_disabled())
2079 case CPUFREQ_TRANSITION_NOTIFIER:
2080 mutex_lock(&cpufreq_fast_switch_lock);
2082 if (cpufreq_fast_switch_count > 0) {
2083 mutex_unlock(&cpufreq_fast_switch_lock);
2086 ret = srcu_notifier_chain_register(
2087 &cpufreq_transition_notifier_list, nb);
2089 cpufreq_fast_switch_count--;
2091 mutex_unlock(&cpufreq_fast_switch_lock);
2093 case CPUFREQ_POLICY_NOTIFIER:
2094 ret = blocking_notifier_chain_register(
2095 &cpufreq_policy_notifier_list, nb);
2103 EXPORT_SYMBOL(cpufreq_register_notifier);
2106 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2107 * @nb: notifier block to be unregistered.
2108 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2110 * Remove a notifier from one of the cpufreq notifier lists.
2112 * This function may sleep and it has the same return values as
2113 * blocking_notifier_chain_unregister().
2115 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2119 if (cpufreq_disabled())
2123 case CPUFREQ_TRANSITION_NOTIFIER:
2124 mutex_lock(&cpufreq_fast_switch_lock);
2126 ret = srcu_notifier_chain_unregister(
2127 &cpufreq_transition_notifier_list, nb);
2128 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2129 cpufreq_fast_switch_count++;
2131 mutex_unlock(&cpufreq_fast_switch_lock);
2133 case CPUFREQ_POLICY_NOTIFIER:
2134 ret = blocking_notifier_chain_unregister(
2135 &cpufreq_policy_notifier_list, nb);
2143 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2146 /*********************************************************************
2148 *********************************************************************/
2151 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2152 * @policy: cpufreq policy to switch the frequency for.
2153 * @target_freq: New frequency to set (may be approximate).
2155 * Carry out a fast frequency switch without sleeping.
2157 * The driver's ->fast_switch() callback invoked by this function must be
2158 * suitable for being called from within RCU-sched read-side critical sections
2159 * and it is expected to select the minimum available frequency greater than or
2160 * equal to @target_freq (CPUFREQ_RELATION_L).
2162 * This function must not be called if policy->fast_switch_enabled is unset.
2164 * Governors calling this function must guarantee that it will never be invoked
2165 * twice in parallel for the same policy and that it will never be called in
2166 * parallel with either ->target() or ->target_index() for the same policy.
2168 * Returns the actual frequency set for the CPU.
2170 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2171 * error condition, the hardware configuration must be preserved.
2173 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2174 unsigned int target_freq)
2179 target_freq = clamp_val(target_freq, policy->min, policy->max);
2180 freq = cpufreq_driver->fast_switch(policy, target_freq);
2186 arch_set_freq_scale(policy->related_cpus, freq,
2187 arch_scale_freq_ref(policy->cpu));
2188 cpufreq_stats_record_transition(policy, freq);
2190 if (trace_cpu_frequency_enabled()) {
2191 for_each_cpu(cpu, policy->cpus)
2192 trace_cpu_frequency(freq, cpu);
2197 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2200 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2202 * @min_perf: Minimum (required) performance level (units of @capacity).
2203 * @target_perf: Target (desired) performance level (units of @capacity).
2204 * @capacity: Capacity of the target CPU.
2206 * Carry out a fast performance level switch of @cpu without sleeping.
2208 * The driver's ->adjust_perf() callback invoked by this function must be
2209 * suitable for being called from within RCU-sched read-side critical sections
2210 * and it is expected to select a suitable performance level equal to or above
2211 * @min_perf and preferably equal to or below @target_perf.
2213 * This function must not be called if policy->fast_switch_enabled is unset.
2215 * Governors calling this function must guarantee that it will never be invoked
2216 * twice in parallel for the same CPU and that it will never be called in
2217 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2220 void cpufreq_driver_adjust_perf(unsigned int cpu,
2221 unsigned long min_perf,
2222 unsigned long target_perf,
2223 unsigned long capacity)
2225 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2229 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2231 * Return 'true' if the ->adjust_perf callback is present for the
2232 * current driver or 'false' otherwise.
2234 bool cpufreq_driver_has_adjust_perf(void)
2236 return !!cpufreq_driver->adjust_perf;
2239 /* Must set freqs->new to intermediate frequency */
2240 static int __target_intermediate(struct cpufreq_policy *policy,
2241 struct cpufreq_freqs *freqs, int index)
2245 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2247 /* We don't need to switch to intermediate freq */
2251 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2252 __func__, policy->cpu, freqs->old, freqs->new);
2254 cpufreq_freq_transition_begin(policy, freqs);
2255 ret = cpufreq_driver->target_intermediate(policy, index);
2256 cpufreq_freq_transition_end(policy, freqs, ret);
2259 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2265 static int __target_index(struct cpufreq_policy *policy, int index)
2267 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2268 unsigned int restore_freq, intermediate_freq = 0;
2269 unsigned int newfreq = policy->freq_table[index].frequency;
2270 int retval = -EINVAL;
2273 if (newfreq == policy->cur)
2276 /* Save last value to restore later on errors */
2277 restore_freq = policy->cur;
2279 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2281 /* Handle switching to intermediate frequency */
2282 if (cpufreq_driver->get_intermediate) {
2283 retval = __target_intermediate(policy, &freqs, index);
2287 intermediate_freq = freqs.new;
2288 /* Set old freq to intermediate */
2289 if (intermediate_freq)
2290 freqs.old = freqs.new;
2293 freqs.new = newfreq;
2294 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2295 __func__, policy->cpu, freqs.old, freqs.new);
2297 cpufreq_freq_transition_begin(policy, &freqs);
2300 retval = cpufreq_driver->target_index(policy, index);
2302 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2306 cpufreq_freq_transition_end(policy, &freqs, retval);
2309 * Failed after setting to intermediate freq? Driver should have
2310 * reverted back to initial frequency and so should we. Check
2311 * here for intermediate_freq instead of get_intermediate, in
2312 * case we haven't switched to intermediate freq at all.
2314 if (unlikely(retval && intermediate_freq)) {
2315 freqs.old = intermediate_freq;
2316 freqs.new = restore_freq;
2317 cpufreq_freq_transition_begin(policy, &freqs);
2318 cpufreq_freq_transition_end(policy, &freqs, 0);
2325 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2326 unsigned int target_freq,
2327 unsigned int relation)
2329 unsigned int old_target_freq = target_freq;
2331 if (cpufreq_disabled())
2334 target_freq = __resolve_freq(policy, target_freq, relation);
2336 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2337 policy->cpu, target_freq, relation, old_target_freq);
2340 * This might look like a redundant call as we are checking it again
2341 * after finding index. But it is left intentionally for cases where
2342 * exactly same freq is called again and so we can save on few function
2345 if (target_freq == policy->cur &&
2346 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2349 if (cpufreq_driver->target) {
2351 * If the driver hasn't setup a single inefficient frequency,
2352 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2354 if (!policy->efficiencies_available)
2355 relation &= ~CPUFREQ_RELATION_E;
2357 return cpufreq_driver->target(policy, target_freq, relation);
2360 if (!cpufreq_driver->target_index)
2363 return __target_index(policy, policy->cached_resolved_idx);
2365 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2367 int cpufreq_driver_target(struct cpufreq_policy *policy,
2368 unsigned int target_freq,
2369 unsigned int relation)
2373 down_write(&policy->rwsem);
2375 ret = __cpufreq_driver_target(policy, target_freq, relation);
2377 up_write(&policy->rwsem);
2381 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2383 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2388 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2392 /* Don't start any governor operations if we are entering suspend */
2393 if (cpufreq_suspended)
2396 * Governor might not be initiated here if ACPI _PPC changed
2397 * notification happened, so check it.
2399 if (!policy->governor)
2402 /* Platform doesn't want dynamic frequency switching ? */
2403 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2404 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2405 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2408 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2409 policy->governor->name, gov->name);
2410 policy->governor = gov;
2416 if (!try_module_get(policy->governor->owner))
2419 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2421 if (policy->governor->init) {
2422 ret = policy->governor->init(policy);
2424 module_put(policy->governor->owner);
2429 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2434 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2436 if (cpufreq_suspended || !policy->governor)
2439 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2441 if (policy->governor->exit)
2442 policy->governor->exit(policy);
2444 module_put(policy->governor->owner);
2447 int cpufreq_start_governor(struct cpufreq_policy *policy)
2451 if (cpufreq_suspended)
2454 if (!policy->governor)
2457 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2459 if (cpufreq_driver->get)
2460 cpufreq_verify_current_freq(policy, false);
2462 if (policy->governor->start) {
2463 ret = policy->governor->start(policy);
2468 if (policy->governor->limits)
2469 policy->governor->limits(policy);
2474 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2476 if (cpufreq_suspended || !policy->governor)
2479 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2481 if (policy->governor->stop)
2482 policy->governor->stop(policy);
2485 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2487 if (cpufreq_suspended || !policy->governor)
2490 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2492 if (policy->governor->limits)
2493 policy->governor->limits(policy);
2496 int cpufreq_register_governor(struct cpufreq_governor *governor)
2503 if (cpufreq_disabled())
2506 mutex_lock(&cpufreq_governor_mutex);
2509 if (!find_governor(governor->name)) {
2511 list_add(&governor->governor_list, &cpufreq_governor_list);
2514 mutex_unlock(&cpufreq_governor_mutex);
2517 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2519 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2521 struct cpufreq_policy *policy;
2522 unsigned long flags;
2527 if (cpufreq_disabled())
2530 /* clear last_governor for all inactive policies */
2531 read_lock_irqsave(&cpufreq_driver_lock, flags);
2532 for_each_inactive_policy(policy) {
2533 if (!strcmp(policy->last_governor, governor->name)) {
2534 policy->governor = NULL;
2535 strcpy(policy->last_governor, "\0");
2538 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2540 mutex_lock(&cpufreq_governor_mutex);
2541 list_del(&governor->governor_list);
2542 mutex_unlock(&cpufreq_governor_mutex);
2544 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2547 /*********************************************************************
2548 * POLICY INTERFACE *
2549 *********************************************************************/
2552 * cpufreq_get_policy - get the current cpufreq_policy
2553 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2555 * @cpu: CPU to find the policy for
2557 * Reads the current cpufreq policy.
2559 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2561 struct cpufreq_policy *cpu_policy;
2565 cpu_policy = cpufreq_cpu_get(cpu);
2569 memcpy(policy, cpu_policy, sizeof(*policy));
2571 cpufreq_cpu_put(cpu_policy);
2574 EXPORT_SYMBOL(cpufreq_get_policy);
2577 * cpufreq_set_policy - Modify cpufreq policy parameters.
2578 * @policy: Policy object to modify.
2579 * @new_gov: Policy governor pointer.
2580 * @new_pol: Policy value (for drivers with built-in governors).
2582 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2583 * limits to be set for the policy, update @policy with the verified limits
2584 * values and either invoke the driver's ->setpolicy() callback (if present) or
2585 * carry out a governor update for @policy. That is, run the current governor's
2586 * ->limits() callback (if @new_gov points to the same object as the one in
2587 * @policy) or replace the governor for @policy with @new_gov.
2589 * The cpuinfo part of @policy is not updated by this function.
2591 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2592 struct cpufreq_governor *new_gov,
2593 unsigned int new_pol)
2595 struct cpufreq_policy_data new_data;
2596 struct cpufreq_governor *old_gov;
2599 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2600 new_data.freq_table = policy->freq_table;
2601 new_data.cpu = policy->cpu;
2603 * PM QoS framework collects all the requests from users and provide us
2604 * the final aggregated value here.
2606 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2607 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2609 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2610 new_data.cpu, new_data.min, new_data.max);
2613 * Verify that the CPU speed can be set within these limits and make sure
2616 ret = cpufreq_driver->verify(&new_data);
2621 * Resolve policy min/max to available frequencies. It ensures
2622 * no frequency resolution will neither overshoot the requested maximum
2623 * nor undershoot the requested minimum.
2625 policy->min = new_data.min;
2626 policy->max = new_data.max;
2627 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2628 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2629 trace_cpu_frequency_limits(policy);
2631 policy->cached_target_freq = UINT_MAX;
2633 pr_debug("new min and max freqs are %u - %u kHz\n",
2634 policy->min, policy->max);
2636 if (cpufreq_driver->setpolicy) {
2637 policy->policy = new_pol;
2638 pr_debug("setting range\n");
2639 return cpufreq_driver->setpolicy(policy);
2642 if (new_gov == policy->governor) {
2643 pr_debug("governor limits update\n");
2644 cpufreq_governor_limits(policy);
2648 pr_debug("governor switch\n");
2650 /* save old, working values */
2651 old_gov = policy->governor;
2652 /* end old governor */
2654 cpufreq_stop_governor(policy);
2655 cpufreq_exit_governor(policy);
2658 /* start new governor */
2659 policy->governor = new_gov;
2660 ret = cpufreq_init_governor(policy);
2662 ret = cpufreq_start_governor(policy);
2664 pr_debug("governor change\n");
2667 cpufreq_exit_governor(policy);
2670 /* new governor failed, so re-start old one */
2671 pr_debug("starting governor %s failed\n", policy->governor->name);
2673 policy->governor = old_gov;
2674 if (cpufreq_init_governor(policy))
2675 policy->governor = NULL;
2677 cpufreq_start_governor(policy);
2684 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2685 * @cpu: CPU to re-evaluate the policy for.
2687 * Update the current frequency for the cpufreq policy of @cpu and use
2688 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2689 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2690 * for the policy in question, among other things.
2692 void cpufreq_update_policy(unsigned int cpu)
2694 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2700 * BIOS might change freq behind our back
2701 * -> ask driver for current freq and notify governors about a change
2703 if (cpufreq_driver->get && has_target() &&
2704 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2707 refresh_frequency_limits(policy);
2710 cpufreq_cpu_release(policy);
2712 EXPORT_SYMBOL(cpufreq_update_policy);
2715 * cpufreq_update_limits - Update policy limits for a given CPU.
2716 * @cpu: CPU to update the policy limits for.
2718 * Invoke the driver's ->update_limits callback if present or call
2719 * cpufreq_update_policy() for @cpu.
2721 void cpufreq_update_limits(unsigned int cpu)
2723 if (cpufreq_driver->update_limits)
2724 cpufreq_driver->update_limits(cpu);
2726 cpufreq_update_policy(cpu);
2728 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2730 /*********************************************************************
2732 *********************************************************************/
2733 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2737 if (!policy->freq_table)
2740 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2742 pr_err("%s: Policy frequency update failed\n", __func__);
2746 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2753 int cpufreq_boost_trigger_state(int state)
2755 struct cpufreq_policy *policy;
2756 unsigned long flags;
2759 if (cpufreq_driver->boost_enabled == state)
2762 write_lock_irqsave(&cpufreq_driver_lock, flags);
2763 cpufreq_driver->boost_enabled = state;
2764 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2767 for_each_active_policy(policy) {
2768 policy->boost_enabled = state;
2769 ret = cpufreq_driver->set_boost(policy, state);
2771 policy->boost_enabled = !policy->boost_enabled;
2772 goto err_reset_state;
2782 write_lock_irqsave(&cpufreq_driver_lock, flags);
2783 cpufreq_driver->boost_enabled = !state;
2784 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2786 pr_err("%s: Cannot %s BOOST\n",
2787 __func__, state ? "enable" : "disable");
2792 static bool cpufreq_boost_supported(void)
2794 return cpufreq_driver->set_boost;
2797 static int create_boost_sysfs_file(void)
2801 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2803 pr_err("%s: cannot register global BOOST sysfs file\n",
2809 static void remove_boost_sysfs_file(void)
2811 if (cpufreq_boost_supported())
2812 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2815 int cpufreq_enable_boost_support(void)
2817 if (!cpufreq_driver)
2820 if (cpufreq_boost_supported())
2823 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2825 /* This will get removed on driver unregister */
2826 return create_boost_sysfs_file();
2828 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2830 int cpufreq_boost_enabled(void)
2832 return cpufreq_driver->boost_enabled;
2834 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2836 /*********************************************************************
2837 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2838 *********************************************************************/
2839 static enum cpuhp_state hp_online;
2841 static int cpuhp_cpufreq_online(unsigned int cpu)
2843 cpufreq_online(cpu);
2848 static int cpuhp_cpufreq_offline(unsigned int cpu)
2850 cpufreq_offline(cpu);
2856 * cpufreq_register_driver - register a CPU Frequency driver
2857 * @driver_data: A struct cpufreq_driver containing the values#
2858 * submitted by the CPU Frequency driver.
2860 * Registers a CPU Frequency driver to this core code. This code
2861 * returns zero on success, -EEXIST when another driver got here first
2862 * (and isn't unregistered in the meantime).
2865 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2867 unsigned long flags;
2870 if (cpufreq_disabled())
2874 * The cpufreq core depends heavily on the availability of device
2875 * structure, make sure they are available before proceeding further.
2877 if (!get_cpu_device(0))
2878 return -EPROBE_DEFER;
2880 if (!driver_data || !driver_data->verify || !driver_data->init ||
2881 !(driver_data->setpolicy || driver_data->target_index ||
2882 driver_data->target) ||
2883 (driver_data->setpolicy && (driver_data->target_index ||
2884 driver_data->target)) ||
2885 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2886 (!driver_data->online != !driver_data->offline) ||
2887 (driver_data->adjust_perf && !driver_data->fast_switch))
2890 pr_debug("trying to register driver %s\n", driver_data->name);
2892 /* Protect against concurrent CPU online/offline. */
2895 write_lock_irqsave(&cpufreq_driver_lock, flags);
2896 if (cpufreq_driver) {
2897 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2901 cpufreq_driver = driver_data;
2902 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2905 * Mark support for the scheduler's frequency invariance engine for
2906 * drivers that implement target(), target_index() or fast_switch().
2908 if (!cpufreq_driver->setpolicy) {
2909 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2910 pr_debug("supports frequency invariance");
2913 if (driver_data->setpolicy)
2914 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2916 if (cpufreq_boost_supported()) {
2917 ret = create_boost_sysfs_file();
2919 goto err_null_driver;
2922 ret = subsys_interface_register(&cpufreq_interface);
2924 goto err_boost_unreg;
2926 if (unlikely(list_empty(&cpufreq_policy_list))) {
2927 /* if all ->init() calls failed, unregister */
2929 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2934 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2936 cpuhp_cpufreq_online,
2937 cpuhp_cpufreq_offline);
2943 pr_debug("driver %s up and running\n", driver_data->name);
2947 subsys_interface_unregister(&cpufreq_interface);
2949 remove_boost_sysfs_file();
2951 write_lock_irqsave(&cpufreq_driver_lock, flags);
2952 cpufreq_driver = NULL;
2953 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2958 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2961 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2963 * Unregister the current CPUFreq driver. Only call this if you have
2964 * the right to do so, i.e. if you have succeeded in initialising before!
2965 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2966 * currently not initialised.
2968 void cpufreq_unregister_driver(struct cpufreq_driver *driver)
2970 unsigned long flags;
2972 if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
2975 pr_debug("unregistering driver %s\n", driver->name);
2977 /* Protect against concurrent cpu hotplug */
2979 subsys_interface_unregister(&cpufreq_interface);
2980 remove_boost_sysfs_file();
2981 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2982 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2984 write_lock_irqsave(&cpufreq_driver_lock, flags);
2986 cpufreq_driver = NULL;
2988 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2991 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2993 static int __init cpufreq_core_init(void)
2995 struct cpufreq_governor *gov = cpufreq_default_governor();
2996 struct device *dev_root;
2998 if (cpufreq_disabled())
3001 dev_root = bus_get_dev_root(&cpu_subsys);
3003 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3004 put_device(dev_root);
3006 BUG_ON(!cpufreq_global_kobject);
3008 if (!strlen(default_governor))
3009 strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3013 module_param(off, int, 0444);
3014 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3015 core_initcall(cpufreq_core_init);