1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <linux/units.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 /* Macros to iterate over CPU policies */
37 #define for_each_suitable_policy(__policy, __active) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
39 if ((__active) == !policy_is_inactive(__policy))
41 #define for_each_active_policy(__policy) \
42 for_each_suitable_policy(__policy, true)
43 #define for_each_inactive_policy(__policy) \
44 for_each_suitable_policy(__policy, false)
46 /* Iterate over governors */
47 static LIST_HEAD(cpufreq_governor_list);
48 #define for_each_governor(__governor) \
49 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51 static char default_governor[CPUFREQ_NAME_LEN];
54 * The "cpufreq driver" - the arch- or hardware-dependent low
55 * level driver of CPUFreq support, and its spinlock. This lock
56 * also protects the cpufreq_cpu_data array.
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
62 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
63 bool cpufreq_supports_freq_invariance(void)
65 return static_branch_likely(&cpufreq_freq_invariance);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
81 static int cpufreq_set_policy(struct cpufreq_policy *policy,
82 struct cpufreq_governor *new_gov,
83 unsigned int new_pol);
86 * Two notifier lists: the "policy" list is involved in the
87 * validation process for a new CPU frequency policy; the
88 * "transition" list for kernel code that needs to handle
89 * changes to devices when the CPU clock speed changes.
90 * The mutex locks both lists.
92 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
93 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
95 static int off __read_mostly;
96 static int cpufreq_disabled(void)
100 void disable_cpufreq(void)
104 static DEFINE_MUTEX(cpufreq_governor_mutex);
106 bool have_governor_per_policy(void)
108 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
110 EXPORT_SYMBOL_GPL(have_governor_per_policy);
112 static struct kobject *cpufreq_global_kobject;
114 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116 if (have_governor_per_policy())
117 return &policy->kobj;
119 return cpufreq_global_kobject;
121 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125 struct kernel_cpustat kcpustat;
130 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
132 kcpustat_cpu_fetch(&kcpustat, cpu);
134 busy_time = kcpustat.cpustat[CPUTIME_USER];
135 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
136 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
137 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
138 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
139 busy_time += kcpustat.cpustat[CPUTIME_NICE];
141 idle_time = cur_wall_time - busy_time;
143 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
145 return div_u64(idle_time, NSEC_PER_USEC);
148 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
150 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
152 if (idle_time == -1ULL)
153 return get_cpu_idle_time_jiffy(cpu, wall);
155 idle_time += get_cpu_iowait_time_us(cpu, wall);
159 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
162 * This is a generic cpufreq init() routine which can be used by cpufreq
163 * drivers of SMP systems. It will do following:
164 * - validate & show freq table passed
165 * - set policies transition latency
166 * - policy->cpus with all possible CPUs
168 void cpufreq_generic_init(struct cpufreq_policy *policy,
169 struct cpufreq_frequency_table *table,
170 unsigned int transition_latency)
172 policy->freq_table = table;
173 policy->cpuinfo.transition_latency = transition_latency;
176 * The driver only supports the SMP configuration where all processors
177 * share the clock and voltage and clock.
179 cpumask_setall(policy->cpus);
181 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
183 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
185 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
187 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
189 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
191 unsigned int cpufreq_generic_get(unsigned int cpu)
193 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
195 if (!policy || IS_ERR(policy->clk)) {
196 pr_err("%s: No %s associated to cpu: %d\n",
197 __func__, policy ? "clk" : "policy", cpu);
201 return clk_get_rate(policy->clk) / 1000;
203 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
207 * @cpu: CPU to find the policy for.
209 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
210 * the kobject reference counter of that policy. Return a valid policy on
211 * success or NULL on failure.
213 * The policy returned by this function has to be released with the help of
214 * cpufreq_cpu_put() to balance its kobject reference counter properly.
216 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
218 struct cpufreq_policy *policy = NULL;
221 if (WARN_ON(cpu >= nr_cpu_ids))
224 /* get the cpufreq driver */
225 read_lock_irqsave(&cpufreq_driver_lock, flags);
227 if (cpufreq_driver) {
229 policy = cpufreq_cpu_get_raw(cpu);
231 kobject_get(&policy->kobj);
234 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
242 * @policy: cpufreq policy returned by cpufreq_cpu_get().
244 void cpufreq_cpu_put(struct cpufreq_policy *policy)
246 kobject_put(&policy->kobj);
248 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
252 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
254 void cpufreq_cpu_release(struct cpufreq_policy *policy)
256 if (WARN_ON(!policy))
259 lockdep_assert_held(&policy->rwsem);
261 up_write(&policy->rwsem);
263 cpufreq_cpu_put(policy);
267 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
268 * @cpu: CPU to find the policy for.
270 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
271 * if the policy returned by it is not NULL, acquire its rwsem for writing.
272 * Return the policy if it is active or release it and return NULL otherwise.
274 * The policy returned by this function has to be released with the help of
275 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
278 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
285 down_write(&policy->rwsem);
287 if (policy_is_inactive(policy)) {
288 cpufreq_cpu_release(policy);
295 /*********************************************************************
296 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
297 *********************************************************************/
300 * adjust_jiffies - Adjust the system "loops_per_jiffy".
301 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
302 * @ci: Frequency change information.
304 * This function alters the system "loops_per_jiffy" for the clock
305 * speed change. Note that loops_per_jiffy cannot be updated on SMP
306 * systems as each CPU might be scaled differently. So, use the arch
307 * per-CPU loops_per_jiffy value wherever possible.
309 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
312 static unsigned long l_p_j_ref;
313 static unsigned int l_p_j_ref_freq;
315 if (ci->flags & CPUFREQ_CONST_LOOPS)
318 if (!l_p_j_ref_freq) {
319 l_p_j_ref = loops_per_jiffy;
320 l_p_j_ref_freq = ci->old;
321 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
322 l_p_j_ref, l_p_j_ref_freq);
324 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
325 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
327 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
328 loops_per_jiffy, ci->new);
334 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
335 * @policy: cpufreq policy to enable fast frequency switching for.
336 * @freqs: contain details of the frequency update.
337 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
339 * This function calls the transition notifiers and adjust_jiffies().
341 * It is called twice on all CPU frequency changes that have external effects.
343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs,
349 BUG_ON(irqs_disabled());
351 if (cpufreq_disabled())
354 freqs->policy = policy;
355 freqs->flags = cpufreq_driver->flags;
356 pr_debug("notification %u of frequency transition to %u kHz\n",
360 case CPUFREQ_PRECHANGE:
362 * Detect if the driver reported a value as "old frequency"
363 * which is not equal to what the cpufreq core thinks is
366 if (policy->cur && policy->cur != freqs->old) {
367 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
368 freqs->old, policy->cur);
369 freqs->old = policy->cur;
372 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
373 CPUFREQ_PRECHANGE, freqs);
375 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
378 case CPUFREQ_POSTCHANGE:
379 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
380 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
381 cpumask_pr_args(policy->cpus));
383 for_each_cpu(cpu, policy->cpus)
384 trace_cpu_frequency(freqs->new, cpu);
386 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
387 CPUFREQ_POSTCHANGE, freqs);
389 cpufreq_stats_record_transition(policy, freqs->new);
390 policy->cur = freqs->new;
394 /* Do post notifications when there are chances that transition has failed */
395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396 struct cpufreq_freqs *freqs, int transition_failed)
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399 if (!transition_failed)
402 swap(freqs->old, freqs->new);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408 struct cpufreq_freqs *freqs)
412 * Catch double invocations of _begin() which lead to self-deadlock.
413 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
414 * doesn't invoke _begin() on their behalf, and hence the chances of
415 * double invocations are very low. Moreover, there are scenarios
416 * where these checks can emit false-positive warnings in these
417 * drivers; so we avoid that by skipping them altogether.
419 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
420 && current == policy->transition_task);
423 wait_event(policy->transition_wait, !policy->transition_ongoing);
425 spin_lock(&policy->transition_lock);
427 if (unlikely(policy->transition_ongoing)) {
428 spin_unlock(&policy->transition_lock);
432 policy->transition_ongoing = true;
433 policy->transition_task = current;
435 spin_unlock(&policy->transition_lock);
437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
442 struct cpufreq_freqs *freqs, int transition_failed)
444 if (WARN_ON(!policy->transition_ongoing))
447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
449 arch_set_freq_scale(policy->related_cpus,
451 policy->cpuinfo.max_freq);
453 spin_lock(&policy->transition_lock);
454 policy->transition_ongoing = false;
455 policy->transition_task = NULL;
456 spin_unlock(&policy->transition_lock);
458 wake_up(&policy->transition_wait);
460 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
463 * Fast frequency switching status count. Positive means "enabled", negative
464 * means "disabled" and 0 means "not decided yet".
466 static int cpufreq_fast_switch_count;
467 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
469 static void cpufreq_list_transition_notifiers(void)
471 struct notifier_block *nb;
473 pr_info("Registered transition notifiers:\n");
475 mutex_lock(&cpufreq_transition_notifier_list.mutex);
477 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
478 pr_info("%pS\n", nb->notifier_call);
480 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
484 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
485 * @policy: cpufreq policy to enable fast frequency switching for.
487 * Try to enable fast frequency switching for @policy.
489 * The attempt will fail if there is at least one transition notifier registered
490 * at this point, as fast frequency switching is quite fundamentally at odds
491 * with transition notifiers. Thus if successful, it will make registration of
492 * transition notifiers fail going forward.
494 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
496 lockdep_assert_held(&policy->rwsem);
498 if (!policy->fast_switch_possible)
501 mutex_lock(&cpufreq_fast_switch_lock);
502 if (cpufreq_fast_switch_count >= 0) {
503 cpufreq_fast_switch_count++;
504 policy->fast_switch_enabled = true;
506 pr_warn("CPU%u: Fast frequency switching not enabled\n",
508 cpufreq_list_transition_notifiers();
510 mutex_unlock(&cpufreq_fast_switch_lock);
512 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
515 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
516 * @policy: cpufreq policy to disable fast frequency switching for.
518 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
520 mutex_lock(&cpufreq_fast_switch_lock);
521 if (policy->fast_switch_enabled) {
522 policy->fast_switch_enabled = false;
523 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
524 cpufreq_fast_switch_count--;
526 mutex_unlock(&cpufreq_fast_switch_lock);
528 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
530 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
531 unsigned int target_freq, unsigned int relation)
535 target_freq = clamp_val(target_freq, policy->min, policy->max);
537 if (!policy->freq_table)
540 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
541 policy->cached_resolved_idx = idx;
542 policy->cached_target_freq = target_freq;
543 return policy->freq_table[idx].frequency;
547 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
549 * @policy: associated policy to interrogate
550 * @target_freq: target frequency to resolve.
552 * The target to driver frequency mapping is cached in the policy.
554 * Return: Lowest driver-supported frequency greater than or equal to the
555 * given target_freq, subject to policy (min/max) and driver limitations.
557 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
558 unsigned int target_freq)
560 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
562 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
564 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
566 unsigned int latency;
568 if (policy->transition_delay_us)
569 return policy->transition_delay_us;
571 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
574 * For platforms that can change the frequency very fast (< 10
575 * us), the above formula gives a decent transition delay. But
576 * for platforms where transition_latency is in milliseconds, it
577 * ends up giving unrealistic values.
579 * Cap the default transition delay to 10 ms, which seems to be
580 * a reasonable amount of time after which we should reevaluate
583 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
586 return LATENCY_MULTIPLIER;
588 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
590 /*********************************************************************
592 *********************************************************************/
593 static ssize_t show_boost(struct kobject *kobj,
594 struct kobj_attribute *attr, char *buf)
596 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
599 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
600 const char *buf, size_t count)
604 ret = sscanf(buf, "%d", &enable);
605 if (ret != 1 || enable < 0 || enable > 1)
608 if (cpufreq_boost_trigger_state(enable)) {
609 pr_err("%s: Cannot %s BOOST!\n",
610 __func__, enable ? "enable" : "disable");
614 pr_debug("%s: cpufreq BOOST %s\n",
615 __func__, enable ? "enabled" : "disabled");
619 define_one_global_rw(boost);
621 static struct cpufreq_governor *find_governor(const char *str_governor)
623 struct cpufreq_governor *t;
626 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
632 static struct cpufreq_governor *get_governor(const char *str_governor)
634 struct cpufreq_governor *t;
636 mutex_lock(&cpufreq_governor_mutex);
637 t = find_governor(str_governor);
641 if (!try_module_get(t->owner))
645 mutex_unlock(&cpufreq_governor_mutex);
650 static unsigned int cpufreq_parse_policy(char *str_governor)
652 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
653 return CPUFREQ_POLICY_PERFORMANCE;
655 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
656 return CPUFREQ_POLICY_POWERSAVE;
658 return CPUFREQ_POLICY_UNKNOWN;
662 * cpufreq_parse_governor - parse a governor string only for has_target()
663 * @str_governor: Governor name.
665 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
667 struct cpufreq_governor *t;
669 t = get_governor(str_governor);
673 if (request_module("cpufreq_%s", str_governor))
676 return get_governor(str_governor);
680 * cpufreq_per_cpu_attr_read() / show_##file_name() -
681 * print out cpufreq information
683 * Write out information from cpufreq_driver->policy[cpu]; object must be
687 #define show_one(file_name, object) \
688 static ssize_t show_##file_name \
689 (struct cpufreq_policy *policy, char *buf) \
691 return sprintf(buf, "%u\n", policy->object); \
694 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
695 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
696 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
697 show_one(scaling_min_freq, min);
698 show_one(scaling_max_freq, max);
700 __weak unsigned int arch_freq_get_on_cpu(int cpu)
705 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
710 freq = arch_freq_get_on_cpu(policy->cpu);
712 ret = sprintf(buf, "%u\n", freq);
713 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
714 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
716 ret = sprintf(buf, "%u\n", policy->cur);
721 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
723 #define store_one(file_name, object) \
724 static ssize_t store_##file_name \
725 (struct cpufreq_policy *policy, const char *buf, size_t count) \
730 ret = sscanf(buf, "%lu", &val); \
734 ret = freq_qos_update_request(policy->object##_freq_req, val);\
735 return ret >= 0 ? count : ret; \
738 store_one(scaling_min_freq, min);
739 store_one(scaling_max_freq, max);
742 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
744 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
747 unsigned int cur_freq = __cpufreq_get(policy);
750 return sprintf(buf, "%u\n", cur_freq);
752 return sprintf(buf, "<unknown>\n");
756 * show_scaling_governor - show the current policy for the specified CPU
758 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
760 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
761 return sprintf(buf, "powersave\n");
762 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
763 return sprintf(buf, "performance\n");
764 else if (policy->governor)
765 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
766 policy->governor->name);
771 * store_scaling_governor - store policy for the specified CPU
773 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
774 const char *buf, size_t count)
776 char str_governor[16];
779 ret = sscanf(buf, "%15s", str_governor);
783 if (cpufreq_driver->setpolicy) {
784 unsigned int new_pol;
786 new_pol = cpufreq_parse_policy(str_governor);
790 ret = cpufreq_set_policy(policy, NULL, new_pol);
792 struct cpufreq_governor *new_gov;
794 new_gov = cpufreq_parse_governor(str_governor);
798 ret = cpufreq_set_policy(policy, new_gov,
799 CPUFREQ_POLICY_UNKNOWN);
801 module_put(new_gov->owner);
804 return ret ? ret : count;
808 * show_scaling_driver - show the cpufreq driver currently loaded
810 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
812 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
816 * show_scaling_available_governors - show the available CPUfreq governors
818 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
822 struct cpufreq_governor *t;
825 i += sprintf(buf, "performance powersave");
829 mutex_lock(&cpufreq_governor_mutex);
830 for_each_governor(t) {
831 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
832 - (CPUFREQ_NAME_LEN + 2)))
834 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
836 mutex_unlock(&cpufreq_governor_mutex);
838 i += sprintf(&buf[i], "\n");
842 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
847 for_each_cpu(cpu, mask) {
848 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
849 if (i >= (PAGE_SIZE - 5))
853 /* Remove the extra space at the end */
856 i += sprintf(&buf[i], "\n");
859 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
862 * show_related_cpus - show the CPUs affected by each transition even if
863 * hw coordination is in use
865 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
867 return cpufreq_show_cpus(policy->related_cpus, buf);
871 * show_affected_cpus - show the CPUs affected by each transition
873 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
875 return cpufreq_show_cpus(policy->cpus, buf);
878 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
879 const char *buf, size_t count)
881 unsigned int freq = 0;
884 if (!policy->governor || !policy->governor->store_setspeed)
887 ret = sscanf(buf, "%u", &freq);
891 policy->governor->store_setspeed(policy, freq);
896 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
898 if (!policy->governor || !policy->governor->show_setspeed)
899 return sprintf(buf, "<unsupported>\n");
901 return policy->governor->show_setspeed(policy, buf);
905 * show_bios_limit - show the current cpufreq HW/BIOS limitation
907 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
911 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
913 return sprintf(buf, "%u\n", limit);
914 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
917 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
918 cpufreq_freq_attr_ro(cpuinfo_min_freq);
919 cpufreq_freq_attr_ro(cpuinfo_max_freq);
920 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
921 cpufreq_freq_attr_ro(scaling_available_governors);
922 cpufreq_freq_attr_ro(scaling_driver);
923 cpufreq_freq_attr_ro(scaling_cur_freq);
924 cpufreq_freq_attr_ro(bios_limit);
925 cpufreq_freq_attr_ro(related_cpus);
926 cpufreq_freq_attr_ro(affected_cpus);
927 cpufreq_freq_attr_rw(scaling_min_freq);
928 cpufreq_freq_attr_rw(scaling_max_freq);
929 cpufreq_freq_attr_rw(scaling_governor);
930 cpufreq_freq_attr_rw(scaling_setspeed);
932 static struct attribute *cpufreq_attrs[] = {
933 &cpuinfo_min_freq.attr,
934 &cpuinfo_max_freq.attr,
935 &cpuinfo_transition_latency.attr,
936 &scaling_min_freq.attr,
937 &scaling_max_freq.attr,
940 &scaling_governor.attr,
941 &scaling_driver.attr,
942 &scaling_available_governors.attr,
943 &scaling_setspeed.attr,
946 ATTRIBUTE_GROUPS(cpufreq);
948 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
949 #define to_attr(a) container_of(a, struct freq_attr, attr)
951 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
953 struct cpufreq_policy *policy = to_policy(kobj);
954 struct freq_attr *fattr = to_attr(attr);
955 ssize_t ret = -EBUSY;
960 down_read(&policy->rwsem);
961 if (likely(!policy_is_inactive(policy)))
962 ret = fattr->show(policy, buf);
963 up_read(&policy->rwsem);
968 static ssize_t store(struct kobject *kobj, struct attribute *attr,
969 const char *buf, size_t count)
971 struct cpufreq_policy *policy = to_policy(kobj);
972 struct freq_attr *fattr = to_attr(attr);
973 ssize_t ret = -EBUSY;
978 down_write(&policy->rwsem);
979 if (likely(!policy_is_inactive(policy)))
980 ret = fattr->store(policy, buf, count);
981 up_write(&policy->rwsem);
986 static void cpufreq_sysfs_release(struct kobject *kobj)
988 struct cpufreq_policy *policy = to_policy(kobj);
989 pr_debug("last reference is dropped\n");
990 complete(&policy->kobj_unregister);
993 static const struct sysfs_ops sysfs_ops = {
998 static struct kobj_type ktype_cpufreq = {
999 .sysfs_ops = &sysfs_ops,
1000 .default_groups = cpufreq_groups,
1001 .release = cpufreq_sysfs_release,
1004 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1010 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1013 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1014 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1015 dev_err(dev, "cpufreq symlink creation failed\n");
1018 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1021 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1022 sysfs_remove_link(&dev->kobj, "cpufreq");
1023 cpumask_clear_cpu(cpu, policy->real_cpus);
1026 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1028 struct freq_attr **drv_attr;
1031 /* set up files for this cpu device */
1032 drv_attr = cpufreq_driver->attr;
1033 while (drv_attr && *drv_attr) {
1034 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1039 if (cpufreq_driver->get) {
1040 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1045 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1049 if (cpufreq_driver->bios_limit) {
1050 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1058 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1060 struct cpufreq_governor *gov = NULL;
1061 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1065 /* Update policy governor to the one used before hotplug. */
1066 gov = get_governor(policy->last_governor);
1068 pr_debug("Restoring governor %s for cpu %d\n",
1069 gov->name, policy->cpu);
1071 gov = get_governor(default_governor);
1075 gov = cpufreq_default_governor();
1076 __module_get(gov->owner);
1081 /* Use the default policy if there is no last_policy. */
1082 if (policy->last_policy) {
1083 pol = policy->last_policy;
1085 pol = cpufreq_parse_policy(default_governor);
1087 * In case the default governor is neither "performance"
1088 * nor "powersave", fall back to the initial policy
1089 * value set by the driver.
1091 if (pol == CPUFREQ_POLICY_UNKNOWN)
1092 pol = policy->policy;
1094 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1095 pol != CPUFREQ_POLICY_POWERSAVE)
1099 ret = cpufreq_set_policy(policy, gov, pol);
1101 module_put(gov->owner);
1106 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1110 /* Has this CPU been taken care of already? */
1111 if (cpumask_test_cpu(cpu, policy->cpus))
1114 down_write(&policy->rwsem);
1116 cpufreq_stop_governor(policy);
1118 cpumask_set_cpu(cpu, policy->cpus);
1121 ret = cpufreq_start_governor(policy);
1123 pr_err("%s: Failed to start governor\n", __func__);
1125 up_write(&policy->rwsem);
1129 void refresh_frequency_limits(struct cpufreq_policy *policy)
1131 if (!policy_is_inactive(policy)) {
1132 pr_debug("updating policy for CPU %u\n", policy->cpu);
1134 cpufreq_set_policy(policy, policy->governor, policy->policy);
1137 EXPORT_SYMBOL(refresh_frequency_limits);
1139 static void handle_update(struct work_struct *work)
1141 struct cpufreq_policy *policy =
1142 container_of(work, struct cpufreq_policy, update);
1144 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1145 down_write(&policy->rwsem);
1146 refresh_frequency_limits(policy);
1147 up_write(&policy->rwsem);
1150 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1153 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1155 schedule_work(&policy->update);
1159 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1162 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1164 schedule_work(&policy->update);
1168 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1170 struct kobject *kobj;
1171 struct completion *cmp;
1173 down_write(&policy->rwsem);
1174 cpufreq_stats_free_table(policy);
1175 kobj = &policy->kobj;
1176 cmp = &policy->kobj_unregister;
1177 up_write(&policy->rwsem);
1181 * We need to make sure that the underlying kobj is
1182 * actually not referenced anymore by anybody before we
1183 * proceed with unloading.
1185 pr_debug("waiting for dropping of refcount\n");
1186 wait_for_completion(cmp);
1187 pr_debug("wait complete\n");
1190 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1192 struct cpufreq_policy *policy;
1193 struct device *dev = get_cpu_device(cpu);
1199 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1203 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1204 goto err_free_policy;
1206 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1207 goto err_free_cpumask;
1209 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1210 goto err_free_rcpumask;
1212 init_completion(&policy->kobj_unregister);
1213 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1214 cpufreq_global_kobject, "policy%u", cpu);
1216 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1218 * The entire policy object will be freed below, but the extra
1219 * memory allocated for the kobject name needs to be freed by
1220 * releasing the kobject.
1222 kobject_put(&policy->kobj);
1223 goto err_free_real_cpus;
1226 freq_constraints_init(&policy->constraints);
1228 policy->nb_min.notifier_call = cpufreq_notifier_min;
1229 policy->nb_max.notifier_call = cpufreq_notifier_max;
1231 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1234 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1235 ret, cpumask_pr_args(policy->cpus));
1236 goto err_kobj_remove;
1239 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1242 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1243 ret, cpumask_pr_args(policy->cpus));
1244 goto err_min_qos_notifier;
1247 INIT_LIST_HEAD(&policy->policy_list);
1248 init_rwsem(&policy->rwsem);
1249 spin_lock_init(&policy->transition_lock);
1250 init_waitqueue_head(&policy->transition_wait);
1251 INIT_WORK(&policy->update, handle_update);
1256 err_min_qos_notifier:
1257 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1260 cpufreq_policy_put_kobj(policy);
1262 free_cpumask_var(policy->real_cpus);
1264 free_cpumask_var(policy->related_cpus);
1266 free_cpumask_var(policy->cpus);
1273 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1275 unsigned long flags;
1279 * The callers must ensure the policy is inactive by now, to avoid any
1280 * races with show()/store() callbacks.
1282 if (unlikely(!policy_is_inactive(policy)))
1283 pr_warn("%s: Freeing active policy\n", __func__);
1285 /* Remove policy from list */
1286 write_lock_irqsave(&cpufreq_driver_lock, flags);
1287 list_del(&policy->policy_list);
1289 for_each_cpu(cpu, policy->related_cpus)
1290 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1291 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1293 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1295 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1298 /* Cancel any pending policy->update work before freeing the policy. */
1299 cancel_work_sync(&policy->update);
1301 if (policy->max_freq_req) {
1303 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1304 * notification, since CPUFREQ_CREATE_POLICY notification was
1305 * sent after adding max_freq_req earlier.
1307 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1308 CPUFREQ_REMOVE_POLICY, policy);
1309 freq_qos_remove_request(policy->max_freq_req);
1312 freq_qos_remove_request(policy->min_freq_req);
1313 kfree(policy->min_freq_req);
1315 cpufreq_policy_put_kobj(policy);
1316 free_cpumask_var(policy->real_cpus);
1317 free_cpumask_var(policy->related_cpus);
1318 free_cpumask_var(policy->cpus);
1322 static int cpufreq_online(unsigned int cpu)
1324 struct cpufreq_policy *policy;
1326 unsigned long flags;
1330 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1332 /* Check if this CPU already has a policy to manage it */
1333 policy = per_cpu(cpufreq_cpu_data, cpu);
1335 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1336 if (!policy_is_inactive(policy))
1337 return cpufreq_add_policy_cpu(policy, cpu);
1339 /* This is the only online CPU for the policy. Start over. */
1341 down_write(&policy->rwsem);
1343 policy->governor = NULL;
1346 policy = cpufreq_policy_alloc(cpu);
1349 down_write(&policy->rwsem);
1352 if (!new_policy && cpufreq_driver->online) {
1353 /* Recover policy->cpus using related_cpus */
1354 cpumask_copy(policy->cpus, policy->related_cpus);
1356 ret = cpufreq_driver->online(policy);
1358 pr_debug("%s: %d: initialization failed\n", __func__,
1360 goto out_exit_policy;
1363 cpumask_copy(policy->cpus, cpumask_of(cpu));
1366 * Call driver. From then on the cpufreq must be able
1367 * to accept all calls to ->verify and ->setpolicy for this CPU.
1369 ret = cpufreq_driver->init(policy);
1371 pr_debug("%s: %d: initialization failed\n", __func__,
1373 goto out_free_policy;
1377 * The initialization has succeeded and the policy is online.
1378 * If there is a problem with its frequency table, take it
1379 * offline and drop it.
1381 ret = cpufreq_table_validate_and_sort(policy);
1383 goto out_offline_policy;
1385 /* related_cpus should at least include policy->cpus. */
1386 cpumask_copy(policy->related_cpus, policy->cpus);
1390 * affected cpus must always be the one, which are online. We aren't
1391 * managing offline cpus here.
1393 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1396 for_each_cpu(j, policy->related_cpus) {
1397 per_cpu(cpufreq_cpu_data, j) = policy;
1398 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1401 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1403 if (!policy->min_freq_req) {
1405 goto out_destroy_policy;
1408 ret = freq_qos_add_request(&policy->constraints,
1409 policy->min_freq_req, FREQ_QOS_MIN,
1410 FREQ_QOS_MIN_DEFAULT_VALUE);
1413 * So we don't call freq_qos_remove_request() for an
1414 * uninitialized request.
1416 kfree(policy->min_freq_req);
1417 policy->min_freq_req = NULL;
1418 goto out_destroy_policy;
1422 * This must be initialized right here to avoid calling
1423 * freq_qos_remove_request() on uninitialized request in case
1426 policy->max_freq_req = policy->min_freq_req + 1;
1428 ret = freq_qos_add_request(&policy->constraints,
1429 policy->max_freq_req, FREQ_QOS_MAX,
1430 FREQ_QOS_MAX_DEFAULT_VALUE);
1432 policy->max_freq_req = NULL;
1433 goto out_destroy_policy;
1436 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1437 CPUFREQ_CREATE_POLICY, policy);
1440 if (cpufreq_driver->get && has_target()) {
1441 policy->cur = cpufreq_driver->get(policy->cpu);
1444 pr_err("%s: ->get() failed\n", __func__);
1445 goto out_destroy_policy;
1450 * Sometimes boot loaders set CPU frequency to a value outside of
1451 * frequency table present with cpufreq core. In such cases CPU might be
1452 * unstable if it has to run on that frequency for long duration of time
1453 * and so its better to set it to a frequency which is specified in
1454 * freq-table. This also makes cpufreq stats inconsistent as
1455 * cpufreq-stats would fail to register because current frequency of CPU
1456 * isn't found in freq-table.
1458 * Because we don't want this change to effect boot process badly, we go
1459 * for the next freq which is >= policy->cur ('cur' must be set by now,
1460 * otherwise we will end up setting freq to lowest of the table as 'cur'
1461 * is initialized to zero).
1463 * We are passing target-freq as "policy->cur - 1" otherwise
1464 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1465 * equal to target-freq.
1467 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1469 unsigned int old_freq = policy->cur;
1471 /* Are we running at unknown frequency ? */
1472 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1473 if (ret == -EINVAL) {
1474 ret = __cpufreq_driver_target(policy, old_freq - 1,
1475 CPUFREQ_RELATION_L);
1478 * Reaching here after boot in a few seconds may not
1479 * mean that system will remain stable at "unknown"
1480 * frequency for longer duration. Hence, a BUG_ON().
1483 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1484 __func__, policy->cpu, old_freq, policy->cur);
1489 ret = cpufreq_add_dev_interface(policy);
1491 goto out_destroy_policy;
1493 cpufreq_stats_create_table(policy);
1495 write_lock_irqsave(&cpufreq_driver_lock, flags);
1496 list_add(&policy->policy_list, &cpufreq_policy_list);
1497 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1500 * Register with the energy model before
1501 * sched_cpufreq_governor_change() is called, which will result
1502 * in rebuilding of the sched domains, which should only be done
1503 * once the energy model is properly initialized for the policy
1506 * Also, this should be called before the policy is registered
1507 * with cooling framework.
1509 if (cpufreq_driver->register_em)
1510 cpufreq_driver->register_em(policy);
1513 ret = cpufreq_init_policy(policy);
1515 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1516 __func__, cpu, ret);
1517 goto out_destroy_policy;
1520 up_write(&policy->rwsem);
1522 kobject_uevent(&policy->kobj, KOBJ_ADD);
1524 /* Callback for handling stuff after policy is ready */
1525 if (cpufreq_driver->ready)
1526 cpufreq_driver->ready(policy);
1528 /* Register cpufreq cooling only for a new policy */
1529 if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1530 policy->cdev = of_cpufreq_cooling_register(policy);
1532 pr_debug("initialization complete\n");
1537 for_each_cpu(j, policy->real_cpus)
1538 remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1541 if (cpufreq_driver->offline)
1542 cpufreq_driver->offline(policy);
1545 if (cpufreq_driver->exit)
1546 cpufreq_driver->exit(policy);
1549 cpumask_clear(policy->cpus);
1550 up_write(&policy->rwsem);
1552 cpufreq_policy_free(policy);
1557 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1559 * @sif: Subsystem interface structure pointer (not used)
1561 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1563 struct cpufreq_policy *policy;
1564 unsigned cpu = dev->id;
1567 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1569 if (cpu_online(cpu)) {
1570 ret = cpufreq_online(cpu);
1575 /* Create sysfs link on CPU registration */
1576 policy = per_cpu(cpufreq_cpu_data, cpu);
1578 add_cpu_dev_symlink(policy, cpu, dev);
1583 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1588 cpufreq_stop_governor(policy);
1590 cpumask_clear_cpu(cpu, policy->cpus);
1592 if (!policy_is_inactive(policy)) {
1593 /* Nominate a new CPU if necessary. */
1594 if (cpu == policy->cpu)
1595 policy->cpu = cpumask_any(policy->cpus);
1597 /* Start the governor again for the active policy. */
1599 ret = cpufreq_start_governor(policy);
1601 pr_err("%s: Failed to start governor\n", __func__);
1608 strncpy(policy->last_governor, policy->governor->name,
1611 policy->last_policy = policy->policy;
1614 cpufreq_exit_governor(policy);
1617 * Perform the ->offline() during light-weight tear-down, as
1618 * that allows fast recovery when the CPU comes back.
1620 if (cpufreq_driver->offline) {
1621 cpufreq_driver->offline(policy);
1622 } else if (cpufreq_driver->exit) {
1623 cpufreq_driver->exit(policy);
1624 policy->freq_table = NULL;
1628 static int cpufreq_offline(unsigned int cpu)
1630 struct cpufreq_policy *policy;
1632 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1634 policy = cpufreq_cpu_get_raw(cpu);
1636 pr_debug("%s: No cpu_data found\n", __func__);
1640 down_write(&policy->rwsem);
1642 __cpufreq_offline(cpu, policy);
1644 up_write(&policy->rwsem);
1649 * cpufreq_remove_dev - remove a CPU device
1651 * Removes the cpufreq interface for a CPU device.
1653 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1655 unsigned int cpu = dev->id;
1656 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1661 down_write(&policy->rwsem);
1663 if (cpu_online(cpu))
1664 __cpufreq_offline(cpu, policy);
1666 remove_cpu_dev_symlink(policy, cpu, dev);
1668 if (!cpumask_empty(policy->real_cpus)) {
1669 up_write(&policy->rwsem);
1674 * Unregister cpufreq cooling once all the CPUs of the policy are
1677 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1678 cpufreq_cooling_unregister(policy->cdev);
1679 policy->cdev = NULL;
1682 /* We did light-weight exit earlier, do full tear down now */
1683 if (cpufreq_driver->offline)
1684 cpufreq_driver->exit(policy);
1686 up_write(&policy->rwsem);
1688 cpufreq_policy_free(policy);
1692 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1693 * @policy: Policy managing CPUs.
1694 * @new_freq: New CPU frequency.
1696 * Adjust to the current frequency first and clean up later by either calling
1697 * cpufreq_update_policy(), or scheduling handle_update().
1699 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1700 unsigned int new_freq)
1702 struct cpufreq_freqs freqs;
1704 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1705 policy->cur, new_freq);
1707 freqs.old = policy->cur;
1708 freqs.new = new_freq;
1710 cpufreq_freq_transition_begin(policy, &freqs);
1711 cpufreq_freq_transition_end(policy, &freqs, 0);
1714 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1716 unsigned int new_freq;
1718 new_freq = cpufreq_driver->get(policy->cpu);
1723 * If fast frequency switching is used with the given policy, the check
1724 * against policy->cur is pointless, so skip it in that case.
1726 if (policy->fast_switch_enabled || !has_target())
1729 if (policy->cur != new_freq) {
1731 * For some platforms, the frequency returned by hardware may be
1732 * slightly different from what is provided in the frequency
1733 * table, for example hardware may return 499 MHz instead of 500
1734 * MHz. In such cases it is better to avoid getting into
1735 * unnecessary frequency updates.
1737 if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1740 cpufreq_out_of_sync(policy, new_freq);
1742 schedule_work(&policy->update);
1749 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1752 * This is the last known freq, without actually getting it from the driver.
1753 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1755 unsigned int cpufreq_quick_get(unsigned int cpu)
1757 struct cpufreq_policy *policy;
1758 unsigned int ret_freq = 0;
1759 unsigned long flags;
1761 read_lock_irqsave(&cpufreq_driver_lock, flags);
1763 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1764 ret_freq = cpufreq_driver->get(cpu);
1765 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1769 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1771 policy = cpufreq_cpu_get(cpu);
1773 ret_freq = policy->cur;
1774 cpufreq_cpu_put(policy);
1779 EXPORT_SYMBOL(cpufreq_quick_get);
1782 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1785 * Just return the max possible frequency for a given CPU.
1787 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1789 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1790 unsigned int ret_freq = 0;
1793 ret_freq = policy->max;
1794 cpufreq_cpu_put(policy);
1799 EXPORT_SYMBOL(cpufreq_quick_get_max);
1802 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1805 * The default return value is the max_freq field of cpuinfo.
1807 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1809 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1810 unsigned int ret_freq = 0;
1813 ret_freq = policy->cpuinfo.max_freq;
1814 cpufreq_cpu_put(policy);
1819 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1821 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1823 if (unlikely(policy_is_inactive(policy)))
1826 return cpufreq_verify_current_freq(policy, true);
1830 * cpufreq_get - get the current CPU frequency (in kHz)
1833 * Get the CPU current (static) CPU frequency
1835 unsigned int cpufreq_get(unsigned int cpu)
1837 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1838 unsigned int ret_freq = 0;
1841 down_read(&policy->rwsem);
1842 if (cpufreq_driver->get)
1843 ret_freq = __cpufreq_get(policy);
1844 up_read(&policy->rwsem);
1846 cpufreq_cpu_put(policy);
1851 EXPORT_SYMBOL(cpufreq_get);
1853 static struct subsys_interface cpufreq_interface = {
1855 .subsys = &cpu_subsys,
1856 .add_dev = cpufreq_add_dev,
1857 .remove_dev = cpufreq_remove_dev,
1861 * In case platform wants some specific frequency to be configured
1864 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1868 if (!policy->suspend_freq) {
1869 pr_debug("%s: suspend_freq not defined\n", __func__);
1873 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1874 policy->suspend_freq);
1876 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1877 CPUFREQ_RELATION_H);
1879 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1880 __func__, policy->suspend_freq, ret);
1884 EXPORT_SYMBOL(cpufreq_generic_suspend);
1887 * cpufreq_suspend() - Suspend CPUFreq governors.
1889 * Called during system wide Suspend/Hibernate cycles for suspending governors
1890 * as some platforms can't change frequency after this point in suspend cycle.
1891 * Because some of the devices (like: i2c, regulators, etc) they use for
1892 * changing frequency are suspended quickly after this point.
1894 void cpufreq_suspend(void)
1896 struct cpufreq_policy *policy;
1898 if (!cpufreq_driver)
1901 if (!has_target() && !cpufreq_driver->suspend)
1904 pr_debug("%s: Suspending Governors\n", __func__);
1906 for_each_active_policy(policy) {
1908 down_write(&policy->rwsem);
1909 cpufreq_stop_governor(policy);
1910 up_write(&policy->rwsem);
1913 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1914 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1915 cpufreq_driver->name);
1919 cpufreq_suspended = true;
1923 * cpufreq_resume() - Resume CPUFreq governors.
1925 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1926 * are suspended with cpufreq_suspend().
1928 void cpufreq_resume(void)
1930 struct cpufreq_policy *policy;
1933 if (!cpufreq_driver)
1936 if (unlikely(!cpufreq_suspended))
1939 cpufreq_suspended = false;
1941 if (!has_target() && !cpufreq_driver->resume)
1944 pr_debug("%s: Resuming Governors\n", __func__);
1946 for_each_active_policy(policy) {
1947 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1948 pr_err("%s: Failed to resume driver: %p\n", __func__,
1950 } else if (has_target()) {
1951 down_write(&policy->rwsem);
1952 ret = cpufreq_start_governor(policy);
1953 up_write(&policy->rwsem);
1956 pr_err("%s: Failed to start governor for policy: %p\n",
1963 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1964 * @flags: Flags to test against the current cpufreq driver's flags.
1966 * Assumes that the driver is there, so callers must ensure that this is the
1969 bool cpufreq_driver_test_flags(u16 flags)
1971 return !!(cpufreq_driver->flags & flags);
1975 * cpufreq_get_current_driver - Return the current driver's name.
1977 * Return the name string of the currently registered cpufreq driver or NULL if
1980 const char *cpufreq_get_current_driver(void)
1983 return cpufreq_driver->name;
1987 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1990 * cpufreq_get_driver_data - Return current driver data.
1992 * Return the private data of the currently registered cpufreq driver, or NULL
1993 * if no cpufreq driver has been registered.
1995 void *cpufreq_get_driver_data(void)
1998 return cpufreq_driver->driver_data;
2002 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2004 /*********************************************************************
2005 * NOTIFIER LISTS INTERFACE *
2006 *********************************************************************/
2009 * cpufreq_register_notifier - Register a notifier with cpufreq.
2010 * @nb: notifier function to register.
2011 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2013 * Add a notifier to one of two lists: either a list of notifiers that run on
2014 * clock rate changes (once before and once after every transition), or a list
2015 * of notifiers that ron on cpufreq policy changes.
2017 * This function may sleep and it has the same return values as
2018 * blocking_notifier_chain_register().
2020 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2024 if (cpufreq_disabled())
2028 case CPUFREQ_TRANSITION_NOTIFIER:
2029 mutex_lock(&cpufreq_fast_switch_lock);
2031 if (cpufreq_fast_switch_count > 0) {
2032 mutex_unlock(&cpufreq_fast_switch_lock);
2035 ret = srcu_notifier_chain_register(
2036 &cpufreq_transition_notifier_list, nb);
2038 cpufreq_fast_switch_count--;
2040 mutex_unlock(&cpufreq_fast_switch_lock);
2042 case CPUFREQ_POLICY_NOTIFIER:
2043 ret = blocking_notifier_chain_register(
2044 &cpufreq_policy_notifier_list, nb);
2052 EXPORT_SYMBOL(cpufreq_register_notifier);
2055 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2056 * @nb: notifier block to be unregistered.
2057 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2059 * Remove a notifier from one of the cpufreq notifier lists.
2061 * This function may sleep and it has the same return values as
2062 * blocking_notifier_chain_unregister().
2064 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2068 if (cpufreq_disabled())
2072 case CPUFREQ_TRANSITION_NOTIFIER:
2073 mutex_lock(&cpufreq_fast_switch_lock);
2075 ret = srcu_notifier_chain_unregister(
2076 &cpufreq_transition_notifier_list, nb);
2077 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2078 cpufreq_fast_switch_count++;
2080 mutex_unlock(&cpufreq_fast_switch_lock);
2082 case CPUFREQ_POLICY_NOTIFIER:
2083 ret = blocking_notifier_chain_unregister(
2084 &cpufreq_policy_notifier_list, nb);
2092 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2095 /*********************************************************************
2097 *********************************************************************/
2100 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2101 * @policy: cpufreq policy to switch the frequency for.
2102 * @target_freq: New frequency to set (may be approximate).
2104 * Carry out a fast frequency switch without sleeping.
2106 * The driver's ->fast_switch() callback invoked by this function must be
2107 * suitable for being called from within RCU-sched read-side critical sections
2108 * and it is expected to select the minimum available frequency greater than or
2109 * equal to @target_freq (CPUFREQ_RELATION_L).
2111 * This function must not be called if policy->fast_switch_enabled is unset.
2113 * Governors calling this function must guarantee that it will never be invoked
2114 * twice in parallel for the same policy and that it will never be called in
2115 * parallel with either ->target() or ->target_index() for the same policy.
2117 * Returns the actual frequency set for the CPU.
2119 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2120 * error condition, the hardware configuration must be preserved.
2122 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2123 unsigned int target_freq)
2128 target_freq = clamp_val(target_freq, policy->min, policy->max);
2129 freq = cpufreq_driver->fast_switch(policy, target_freq);
2135 arch_set_freq_scale(policy->related_cpus, freq,
2136 policy->cpuinfo.max_freq);
2137 cpufreq_stats_record_transition(policy, freq);
2139 if (trace_cpu_frequency_enabled()) {
2140 for_each_cpu(cpu, policy->cpus)
2141 trace_cpu_frequency(freq, cpu);
2146 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2149 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2151 * @min_perf: Minimum (required) performance level (units of @capacity).
2152 * @target_perf: Target (desired) performance level (units of @capacity).
2153 * @capacity: Capacity of the target CPU.
2155 * Carry out a fast performance level switch of @cpu without sleeping.
2157 * The driver's ->adjust_perf() callback invoked by this function must be
2158 * suitable for being called from within RCU-sched read-side critical sections
2159 * and it is expected to select a suitable performance level equal to or above
2160 * @min_perf and preferably equal to or below @target_perf.
2162 * This function must not be called if policy->fast_switch_enabled is unset.
2164 * Governors calling this function must guarantee that it will never be invoked
2165 * twice in parallel for the same CPU and that it will never be called in
2166 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2169 void cpufreq_driver_adjust_perf(unsigned int cpu,
2170 unsigned long min_perf,
2171 unsigned long target_perf,
2172 unsigned long capacity)
2174 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2178 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2180 * Return 'true' if the ->adjust_perf callback is present for the
2181 * current driver or 'false' otherwise.
2183 bool cpufreq_driver_has_adjust_perf(void)
2185 return !!cpufreq_driver->adjust_perf;
2188 /* Must set freqs->new to intermediate frequency */
2189 static int __target_intermediate(struct cpufreq_policy *policy,
2190 struct cpufreq_freqs *freqs, int index)
2194 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2196 /* We don't need to switch to intermediate freq */
2200 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2201 __func__, policy->cpu, freqs->old, freqs->new);
2203 cpufreq_freq_transition_begin(policy, freqs);
2204 ret = cpufreq_driver->target_intermediate(policy, index);
2205 cpufreq_freq_transition_end(policy, freqs, ret);
2208 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2214 static int __target_index(struct cpufreq_policy *policy, int index)
2216 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2217 unsigned int restore_freq, intermediate_freq = 0;
2218 unsigned int newfreq = policy->freq_table[index].frequency;
2219 int retval = -EINVAL;
2222 if (newfreq == policy->cur)
2225 /* Save last value to restore later on errors */
2226 restore_freq = policy->cur;
2228 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2230 /* Handle switching to intermediate frequency */
2231 if (cpufreq_driver->get_intermediate) {
2232 retval = __target_intermediate(policy, &freqs, index);
2236 intermediate_freq = freqs.new;
2237 /* Set old freq to intermediate */
2238 if (intermediate_freq)
2239 freqs.old = freqs.new;
2242 freqs.new = newfreq;
2243 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2244 __func__, policy->cpu, freqs.old, freqs.new);
2246 cpufreq_freq_transition_begin(policy, &freqs);
2249 retval = cpufreq_driver->target_index(policy, index);
2251 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2255 cpufreq_freq_transition_end(policy, &freqs, retval);
2258 * Failed after setting to intermediate freq? Driver should have
2259 * reverted back to initial frequency and so should we. Check
2260 * here for intermediate_freq instead of get_intermediate, in
2261 * case we haven't switched to intermediate freq at all.
2263 if (unlikely(retval && intermediate_freq)) {
2264 freqs.old = intermediate_freq;
2265 freqs.new = restore_freq;
2266 cpufreq_freq_transition_begin(policy, &freqs);
2267 cpufreq_freq_transition_end(policy, &freqs, 0);
2274 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2275 unsigned int target_freq,
2276 unsigned int relation)
2278 unsigned int old_target_freq = target_freq;
2280 if (cpufreq_disabled())
2283 target_freq = __resolve_freq(policy, target_freq, relation);
2285 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2286 policy->cpu, target_freq, relation, old_target_freq);
2289 * This might look like a redundant call as we are checking it again
2290 * after finding index. But it is left intentionally for cases where
2291 * exactly same freq is called again and so we can save on few function
2294 if (target_freq == policy->cur &&
2295 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2298 if (cpufreq_driver->target) {
2300 * If the driver hasn't setup a single inefficient frequency,
2301 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2303 if (!policy->efficiencies_available)
2304 relation &= ~CPUFREQ_RELATION_E;
2306 return cpufreq_driver->target(policy, target_freq, relation);
2309 if (!cpufreq_driver->target_index)
2312 return __target_index(policy, policy->cached_resolved_idx);
2314 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2316 int cpufreq_driver_target(struct cpufreq_policy *policy,
2317 unsigned int target_freq,
2318 unsigned int relation)
2322 down_write(&policy->rwsem);
2324 ret = __cpufreq_driver_target(policy, target_freq, relation);
2326 up_write(&policy->rwsem);
2330 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2332 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2337 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2341 /* Don't start any governor operations if we are entering suspend */
2342 if (cpufreq_suspended)
2345 * Governor might not be initiated here if ACPI _PPC changed
2346 * notification happened, so check it.
2348 if (!policy->governor)
2351 /* Platform doesn't want dynamic frequency switching ? */
2352 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2353 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2354 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2357 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2358 policy->governor->name, gov->name);
2359 policy->governor = gov;
2365 if (!try_module_get(policy->governor->owner))
2368 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2370 if (policy->governor->init) {
2371 ret = policy->governor->init(policy);
2373 module_put(policy->governor->owner);
2378 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2383 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2385 if (cpufreq_suspended || !policy->governor)
2388 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2390 if (policy->governor->exit)
2391 policy->governor->exit(policy);
2393 module_put(policy->governor->owner);
2396 int cpufreq_start_governor(struct cpufreq_policy *policy)
2400 if (cpufreq_suspended)
2403 if (!policy->governor)
2406 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2408 if (cpufreq_driver->get)
2409 cpufreq_verify_current_freq(policy, false);
2411 if (policy->governor->start) {
2412 ret = policy->governor->start(policy);
2417 if (policy->governor->limits)
2418 policy->governor->limits(policy);
2423 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2425 if (cpufreq_suspended || !policy->governor)
2428 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2430 if (policy->governor->stop)
2431 policy->governor->stop(policy);
2434 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2436 if (cpufreq_suspended || !policy->governor)
2439 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2441 if (policy->governor->limits)
2442 policy->governor->limits(policy);
2445 int cpufreq_register_governor(struct cpufreq_governor *governor)
2452 if (cpufreq_disabled())
2455 mutex_lock(&cpufreq_governor_mutex);
2458 if (!find_governor(governor->name)) {
2460 list_add(&governor->governor_list, &cpufreq_governor_list);
2463 mutex_unlock(&cpufreq_governor_mutex);
2466 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2468 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2470 struct cpufreq_policy *policy;
2471 unsigned long flags;
2476 if (cpufreq_disabled())
2479 /* clear last_governor for all inactive policies */
2480 read_lock_irqsave(&cpufreq_driver_lock, flags);
2481 for_each_inactive_policy(policy) {
2482 if (!strcmp(policy->last_governor, governor->name)) {
2483 policy->governor = NULL;
2484 strcpy(policy->last_governor, "\0");
2487 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2489 mutex_lock(&cpufreq_governor_mutex);
2490 list_del(&governor->governor_list);
2491 mutex_unlock(&cpufreq_governor_mutex);
2493 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2496 /*********************************************************************
2497 * POLICY INTERFACE *
2498 *********************************************************************/
2501 * cpufreq_get_policy - get the current cpufreq_policy
2502 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2504 * @cpu: CPU to find the policy for
2506 * Reads the current cpufreq policy.
2508 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2510 struct cpufreq_policy *cpu_policy;
2514 cpu_policy = cpufreq_cpu_get(cpu);
2518 memcpy(policy, cpu_policy, sizeof(*policy));
2520 cpufreq_cpu_put(cpu_policy);
2523 EXPORT_SYMBOL(cpufreq_get_policy);
2526 * cpufreq_set_policy - Modify cpufreq policy parameters.
2527 * @policy: Policy object to modify.
2528 * @new_gov: Policy governor pointer.
2529 * @new_pol: Policy value (for drivers with built-in governors).
2531 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2532 * limits to be set for the policy, update @policy with the verified limits
2533 * values and either invoke the driver's ->setpolicy() callback (if present) or
2534 * carry out a governor update for @policy. That is, run the current governor's
2535 * ->limits() callback (if @new_gov points to the same object as the one in
2536 * @policy) or replace the governor for @policy with @new_gov.
2538 * The cpuinfo part of @policy is not updated by this function.
2540 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2541 struct cpufreq_governor *new_gov,
2542 unsigned int new_pol)
2544 struct cpufreq_policy_data new_data;
2545 struct cpufreq_governor *old_gov;
2548 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2549 new_data.freq_table = policy->freq_table;
2550 new_data.cpu = policy->cpu;
2552 * PM QoS framework collects all the requests from users and provide us
2553 * the final aggregated value here.
2555 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2556 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2558 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2559 new_data.cpu, new_data.min, new_data.max);
2562 * Verify that the CPU speed can be set within these limits and make sure
2565 ret = cpufreq_driver->verify(&new_data);
2570 * Resolve policy min/max to available frequencies. It ensures
2571 * no frequency resolution will neither overshoot the requested maximum
2572 * nor undershoot the requested minimum.
2574 policy->min = new_data.min;
2575 policy->max = new_data.max;
2576 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2577 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2578 trace_cpu_frequency_limits(policy);
2580 policy->cached_target_freq = UINT_MAX;
2582 pr_debug("new min and max freqs are %u - %u kHz\n",
2583 policy->min, policy->max);
2585 if (cpufreq_driver->setpolicy) {
2586 policy->policy = new_pol;
2587 pr_debug("setting range\n");
2588 return cpufreq_driver->setpolicy(policy);
2591 if (new_gov == policy->governor) {
2592 pr_debug("governor limits update\n");
2593 cpufreq_governor_limits(policy);
2597 pr_debug("governor switch\n");
2599 /* save old, working values */
2600 old_gov = policy->governor;
2601 /* end old governor */
2603 cpufreq_stop_governor(policy);
2604 cpufreq_exit_governor(policy);
2607 /* start new governor */
2608 policy->governor = new_gov;
2609 ret = cpufreq_init_governor(policy);
2611 ret = cpufreq_start_governor(policy);
2613 pr_debug("governor change\n");
2614 sched_cpufreq_governor_change(policy, old_gov);
2617 cpufreq_exit_governor(policy);
2620 /* new governor failed, so re-start old one */
2621 pr_debug("starting governor %s failed\n", policy->governor->name);
2623 policy->governor = old_gov;
2624 if (cpufreq_init_governor(policy))
2625 policy->governor = NULL;
2627 cpufreq_start_governor(policy);
2634 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2635 * @cpu: CPU to re-evaluate the policy for.
2637 * Update the current frequency for the cpufreq policy of @cpu and use
2638 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2639 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2640 * for the policy in question, among other things.
2642 void cpufreq_update_policy(unsigned int cpu)
2644 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2650 * BIOS might change freq behind our back
2651 * -> ask driver for current freq and notify governors about a change
2653 if (cpufreq_driver->get && has_target() &&
2654 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2657 refresh_frequency_limits(policy);
2660 cpufreq_cpu_release(policy);
2662 EXPORT_SYMBOL(cpufreq_update_policy);
2665 * cpufreq_update_limits - Update policy limits for a given CPU.
2666 * @cpu: CPU to update the policy limits for.
2668 * Invoke the driver's ->update_limits callback if present or call
2669 * cpufreq_update_policy() for @cpu.
2671 void cpufreq_update_limits(unsigned int cpu)
2673 if (cpufreq_driver->update_limits)
2674 cpufreq_driver->update_limits(cpu);
2676 cpufreq_update_policy(cpu);
2678 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2680 /*********************************************************************
2682 *********************************************************************/
2683 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2687 if (!policy->freq_table)
2690 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2692 pr_err("%s: Policy frequency update failed\n", __func__);
2696 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2703 int cpufreq_boost_trigger_state(int state)
2705 struct cpufreq_policy *policy;
2706 unsigned long flags;
2709 if (cpufreq_driver->boost_enabled == state)
2712 write_lock_irqsave(&cpufreq_driver_lock, flags);
2713 cpufreq_driver->boost_enabled = state;
2714 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2717 for_each_active_policy(policy) {
2718 ret = cpufreq_driver->set_boost(policy, state);
2720 goto err_reset_state;
2729 write_lock_irqsave(&cpufreq_driver_lock, flags);
2730 cpufreq_driver->boost_enabled = !state;
2731 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2733 pr_err("%s: Cannot %s BOOST\n",
2734 __func__, state ? "enable" : "disable");
2739 static bool cpufreq_boost_supported(void)
2741 return cpufreq_driver->set_boost;
2744 static int create_boost_sysfs_file(void)
2748 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2750 pr_err("%s: cannot register global BOOST sysfs file\n",
2756 static void remove_boost_sysfs_file(void)
2758 if (cpufreq_boost_supported())
2759 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2762 int cpufreq_enable_boost_support(void)
2764 if (!cpufreq_driver)
2767 if (cpufreq_boost_supported())
2770 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2772 /* This will get removed on driver unregister */
2773 return create_boost_sysfs_file();
2775 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2777 int cpufreq_boost_enabled(void)
2779 return cpufreq_driver->boost_enabled;
2781 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2783 /*********************************************************************
2784 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2785 *********************************************************************/
2786 static enum cpuhp_state hp_online;
2788 static int cpuhp_cpufreq_online(unsigned int cpu)
2790 cpufreq_online(cpu);
2795 static int cpuhp_cpufreq_offline(unsigned int cpu)
2797 cpufreq_offline(cpu);
2803 * cpufreq_register_driver - register a CPU Frequency driver
2804 * @driver_data: A struct cpufreq_driver containing the values#
2805 * submitted by the CPU Frequency driver.
2807 * Registers a CPU Frequency driver to this core code. This code
2808 * returns zero on success, -EEXIST when another driver got here first
2809 * (and isn't unregistered in the meantime).
2812 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2814 unsigned long flags;
2817 if (cpufreq_disabled())
2821 * The cpufreq core depends heavily on the availability of device
2822 * structure, make sure they are available before proceeding further.
2824 if (!get_cpu_device(0))
2825 return -EPROBE_DEFER;
2827 if (!driver_data || !driver_data->verify || !driver_data->init ||
2828 !(driver_data->setpolicy || driver_data->target_index ||
2829 driver_data->target) ||
2830 (driver_data->setpolicy && (driver_data->target_index ||
2831 driver_data->target)) ||
2832 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2833 (!driver_data->online != !driver_data->offline))
2836 pr_debug("trying to register driver %s\n", driver_data->name);
2838 /* Protect against concurrent CPU online/offline. */
2841 write_lock_irqsave(&cpufreq_driver_lock, flags);
2842 if (cpufreq_driver) {
2843 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2847 cpufreq_driver = driver_data;
2848 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2851 * Mark support for the scheduler's frequency invariance engine for
2852 * drivers that implement target(), target_index() or fast_switch().
2854 if (!cpufreq_driver->setpolicy) {
2855 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2856 pr_debug("supports frequency invariance");
2859 if (driver_data->setpolicy)
2860 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2862 if (cpufreq_boost_supported()) {
2863 ret = create_boost_sysfs_file();
2865 goto err_null_driver;
2868 ret = subsys_interface_register(&cpufreq_interface);
2870 goto err_boost_unreg;
2872 if (unlikely(list_empty(&cpufreq_policy_list))) {
2873 /* if all ->init() calls failed, unregister */
2875 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2880 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2882 cpuhp_cpufreq_online,
2883 cpuhp_cpufreq_offline);
2889 pr_debug("driver %s up and running\n", driver_data->name);
2893 subsys_interface_unregister(&cpufreq_interface);
2895 remove_boost_sysfs_file();
2897 write_lock_irqsave(&cpufreq_driver_lock, flags);
2898 cpufreq_driver = NULL;
2899 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2904 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2907 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2909 * Unregister the current CPUFreq driver. Only call this if you have
2910 * the right to do so, i.e. if you have succeeded in initialising before!
2911 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2912 * currently not initialised.
2914 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2916 unsigned long flags;
2918 if (!cpufreq_driver || (driver != cpufreq_driver))
2921 pr_debug("unregistering driver %s\n", driver->name);
2923 /* Protect against concurrent cpu hotplug */
2925 subsys_interface_unregister(&cpufreq_interface);
2926 remove_boost_sysfs_file();
2927 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2928 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2930 write_lock_irqsave(&cpufreq_driver_lock, flags);
2932 cpufreq_driver = NULL;
2934 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2939 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2941 static int __init cpufreq_core_init(void)
2943 struct cpufreq_governor *gov = cpufreq_default_governor();
2945 if (cpufreq_disabled())
2948 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2949 BUG_ON(!cpufreq_global_kobject);
2951 if (!strlen(default_governor))
2952 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2956 module_param(off, int, 0444);
2957 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2958 core_initcall(cpufreq_core_init);