1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
33 static LIST_HEAD(cpufreq_policy_list);
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
40 #define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
45 #define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
48 /* Iterate over governors */
49 static LIST_HEAD(cpufreq_governor_list);
50 #define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
54 * The "cpufreq driver" - the arch- or hardware-dependent low
55 * level driver of CPUFreq support, and its spinlock. This lock
56 * also protects the cpufreq_cpu_data array.
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
62 /* Flag to suspend/resume CPUFreq governors */
63 static bool cpufreq_suspended;
65 static inline bool has_target(void)
67 return cpufreq_driver->target_index || cpufreq_driver->target;
70 /* internal prototypes */
71 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
72 static int cpufreq_init_governor(struct cpufreq_policy *policy);
73 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
74 static int cpufreq_start_governor(struct cpufreq_policy *policy);
75 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
76 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
77 static int cpufreq_set_policy(struct cpufreq_policy *policy,
78 struct cpufreq_governor *new_gov,
79 unsigned int new_pol);
82 * Two notifier lists: the "policy" list is involved in the
83 * validation process for a new CPU frequency policy; the
84 * "transition" list for kernel code that needs to handle
85 * changes to devices when the CPU clock speed changes.
86 * The mutex locks both lists.
88 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
89 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
91 static int off __read_mostly;
92 static int cpufreq_disabled(void)
96 void disable_cpufreq(void)
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
102 bool have_governor_per_policy(void)
104 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
108 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
110 if (have_governor_per_policy())
111 return &policy->kobj;
113 return cpufreq_global_kobject;
115 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
117 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
125 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
132 idle_time = cur_wall_time - busy_time;
134 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
136 return div_u64(idle_time, NSEC_PER_USEC);
139 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
141 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
143 if (idle_time == -1ULL)
144 return get_cpu_idle_time_jiffy(cpu, wall);
146 idle_time += get_cpu_iowait_time_us(cpu, wall);
150 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
152 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
153 unsigned long max_freq)
156 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
159 * This is a generic cpufreq init() routine which can be used by cpufreq
160 * drivers of SMP systems. It will do following:
161 * - validate & show freq table passed
162 * - set policies transition latency
163 * - policy->cpus with all possible CPUs
165 void cpufreq_generic_init(struct cpufreq_policy *policy,
166 struct cpufreq_frequency_table *table,
167 unsigned int transition_latency)
169 policy->freq_table = table;
170 policy->cpuinfo.transition_latency = transition_latency;
173 * The driver only supports the SMP configuration where all processors
174 * share the clock and voltage and clock.
176 cpumask_setall(policy->cpus);
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
180 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
184 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
186 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
188 unsigned int cpufreq_generic_get(unsigned int cpu)
190 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
192 if (!policy || IS_ERR(policy->clk)) {
193 pr_err("%s: No %s associated to cpu: %d\n",
194 __func__, policy ? "clk" : "policy", cpu);
198 return clk_get_rate(policy->clk) / 1000;
200 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
204 * @cpu: CPU to find the policy for.
206 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
207 * the kobject reference counter of that policy. Return a valid policy on
208 * success or NULL on failure.
210 * The policy returned by this function has to be released with the help of
211 * cpufreq_cpu_put() to balance its kobject reference counter properly.
213 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
215 struct cpufreq_policy *policy = NULL;
218 if (WARN_ON(cpu >= nr_cpu_ids))
221 /* get the cpufreq driver */
222 read_lock_irqsave(&cpufreq_driver_lock, flags);
224 if (cpufreq_driver) {
226 policy = cpufreq_cpu_get_raw(cpu);
228 kobject_get(&policy->kobj);
231 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
235 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
238 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
239 * @policy: cpufreq policy returned by cpufreq_cpu_get().
241 void cpufreq_cpu_put(struct cpufreq_policy *policy)
243 kobject_put(&policy->kobj);
245 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
248 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
249 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
251 void cpufreq_cpu_release(struct cpufreq_policy *policy)
253 if (WARN_ON(!policy))
256 lockdep_assert_held(&policy->rwsem);
258 up_write(&policy->rwsem);
260 cpufreq_cpu_put(policy);
264 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
265 * @cpu: CPU to find the policy for.
267 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
268 * if the policy returned by it is not NULL, acquire its rwsem for writing.
269 * Return the policy if it is active or release it and return NULL otherwise.
271 * The policy returned by this function has to be released with the help of
272 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
275 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
277 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
282 down_write(&policy->rwsem);
284 if (policy_is_inactive(policy)) {
285 cpufreq_cpu_release(policy);
292 /*********************************************************************
293 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
294 *********************************************************************/
297 * adjust_jiffies - adjust the system "loops_per_jiffy"
299 * This function alters the system "loops_per_jiffy" for the clock
300 * speed change. Note that loops_per_jiffy cannot be updated on SMP
301 * systems as each CPU might be scaled differently. So, use the arch
302 * per-CPU loops_per_jiffy value wherever possible.
304 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
307 static unsigned long l_p_j_ref;
308 static unsigned int l_p_j_ref_freq;
310 if (ci->flags & CPUFREQ_CONST_LOOPS)
313 if (!l_p_j_ref_freq) {
314 l_p_j_ref = loops_per_jiffy;
315 l_p_j_ref_freq = ci->old;
316 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
317 l_p_j_ref, l_p_j_ref_freq);
319 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
320 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
322 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
323 loops_per_jiffy, ci->new);
329 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
330 * @policy: cpufreq policy to enable fast frequency switching for.
331 * @freqs: contain details of the frequency update.
332 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
334 * This function calls the transition notifiers and the "adjust_jiffies"
335 * function. It is called twice on all CPU frequency changes that have
338 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs,
344 BUG_ON(irqs_disabled());
346 if (cpufreq_disabled())
349 freqs->policy = policy;
350 freqs->flags = cpufreq_driver->flags;
351 pr_debug("notification %u of frequency transition to %u kHz\n",
355 case CPUFREQ_PRECHANGE:
357 * Detect if the driver reported a value as "old frequency"
358 * which is not equal to what the cpufreq core thinks is
361 if (policy->cur && policy->cur != freqs->old) {
362 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
363 freqs->old, policy->cur);
364 freqs->old = policy->cur;
367 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
368 CPUFREQ_PRECHANGE, freqs);
370 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
373 case CPUFREQ_POSTCHANGE:
374 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
375 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
376 cpumask_pr_args(policy->cpus));
378 for_each_cpu(cpu, policy->cpus)
379 trace_cpu_frequency(freqs->new, cpu);
381 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
382 CPUFREQ_POSTCHANGE, freqs);
384 cpufreq_stats_record_transition(policy, freqs->new);
385 policy->cur = freqs->new;
389 /* Do post notifications when there are chances that transition has failed */
390 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
391 struct cpufreq_freqs *freqs, int transition_failed)
393 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
394 if (!transition_failed)
397 swap(freqs->old, freqs->new);
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
402 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
403 struct cpufreq_freqs *freqs)
407 * Catch double invocations of _begin() which lead to self-deadlock.
408 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
409 * doesn't invoke _begin() on their behalf, and hence the chances of
410 * double invocations are very low. Moreover, there are scenarios
411 * where these checks can emit false-positive warnings in these
412 * drivers; so we avoid that by skipping them altogether.
414 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
415 && current == policy->transition_task);
418 wait_event(policy->transition_wait, !policy->transition_ongoing);
420 spin_lock(&policy->transition_lock);
422 if (unlikely(policy->transition_ongoing)) {
423 spin_unlock(&policy->transition_lock);
427 policy->transition_ongoing = true;
428 policy->transition_task = current;
430 spin_unlock(&policy->transition_lock);
432 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
434 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
436 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
437 struct cpufreq_freqs *freqs, int transition_failed)
439 if (WARN_ON(!policy->transition_ongoing))
442 cpufreq_notify_post_transition(policy, freqs, transition_failed);
444 policy->transition_ongoing = false;
445 policy->transition_task = NULL;
447 wake_up(&policy->transition_wait);
449 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
452 * Fast frequency switching status count. Positive means "enabled", negative
453 * means "disabled" and 0 means "not decided yet".
455 static int cpufreq_fast_switch_count;
456 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
458 static void cpufreq_list_transition_notifiers(void)
460 struct notifier_block *nb;
462 pr_info("Registered transition notifiers:\n");
464 mutex_lock(&cpufreq_transition_notifier_list.mutex);
466 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
467 pr_info("%pS\n", nb->notifier_call);
469 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
473 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
474 * @policy: cpufreq policy to enable fast frequency switching for.
476 * Try to enable fast frequency switching for @policy.
478 * The attempt will fail if there is at least one transition notifier registered
479 * at this point, as fast frequency switching is quite fundamentally at odds
480 * with transition notifiers. Thus if successful, it will make registration of
481 * transition notifiers fail going forward.
483 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
485 lockdep_assert_held(&policy->rwsem);
487 if (!policy->fast_switch_possible)
490 mutex_lock(&cpufreq_fast_switch_lock);
491 if (cpufreq_fast_switch_count >= 0) {
492 cpufreq_fast_switch_count++;
493 policy->fast_switch_enabled = true;
495 pr_warn("CPU%u: Fast frequency switching not enabled\n",
497 cpufreq_list_transition_notifiers();
499 mutex_unlock(&cpufreq_fast_switch_lock);
501 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
504 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
505 * @policy: cpufreq policy to disable fast frequency switching for.
507 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
509 mutex_lock(&cpufreq_fast_switch_lock);
510 if (policy->fast_switch_enabled) {
511 policy->fast_switch_enabled = false;
512 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
513 cpufreq_fast_switch_count--;
515 mutex_unlock(&cpufreq_fast_switch_lock);
517 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
520 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
522 * @target_freq: target frequency to resolve.
524 * The target to driver frequency mapping is cached in the policy.
526 * Return: Lowest driver-supported frequency greater than or equal to the
527 * given target_freq, subject to policy (min/max) and driver limitations.
529 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
530 unsigned int target_freq)
532 target_freq = clamp_val(target_freq, policy->min, policy->max);
533 policy->cached_target_freq = target_freq;
535 if (cpufreq_driver->target_index) {
538 idx = cpufreq_frequency_table_target(policy, target_freq,
540 policy->cached_resolved_idx = idx;
541 return policy->freq_table[idx].frequency;
544 if (cpufreq_driver->resolve_freq)
545 return cpufreq_driver->resolve_freq(policy, target_freq);
549 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
551 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
553 unsigned int latency;
555 if (policy->transition_delay_us)
556 return policy->transition_delay_us;
558 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
561 * For platforms that can change the frequency very fast (< 10
562 * us), the above formula gives a decent transition delay. But
563 * for platforms where transition_latency is in milliseconds, it
564 * ends up giving unrealistic values.
566 * Cap the default transition delay to 10 ms, which seems to be
567 * a reasonable amount of time after which we should reevaluate
570 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
573 return LATENCY_MULTIPLIER;
575 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
577 /*********************************************************************
579 *********************************************************************/
580 static ssize_t show_boost(struct kobject *kobj,
581 struct kobj_attribute *attr, char *buf)
583 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
586 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
587 const char *buf, size_t count)
591 ret = sscanf(buf, "%d", &enable);
592 if (ret != 1 || enable < 0 || enable > 1)
595 if (cpufreq_boost_trigger_state(enable)) {
596 pr_err("%s: Cannot %s BOOST!\n",
597 __func__, enable ? "enable" : "disable");
601 pr_debug("%s: cpufreq BOOST %s\n",
602 __func__, enable ? "enabled" : "disabled");
606 define_one_global_rw(boost);
608 static struct cpufreq_governor *find_governor(const char *str_governor)
610 struct cpufreq_governor *t;
613 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
619 static struct cpufreq_governor *get_governor(const char *str_governor)
621 struct cpufreq_governor *t;
623 mutex_lock(&cpufreq_governor_mutex);
624 t = find_governor(str_governor);
628 if (!try_module_get(t->owner))
632 mutex_unlock(&cpufreq_governor_mutex);
637 static unsigned int cpufreq_parse_policy(char *str_governor)
639 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
640 return CPUFREQ_POLICY_PERFORMANCE;
642 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
643 return CPUFREQ_POLICY_POWERSAVE;
645 return CPUFREQ_POLICY_UNKNOWN;
649 * cpufreq_parse_governor - parse a governor string only for has_target()
650 * @str_governor: Governor name.
652 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
654 struct cpufreq_governor *t;
656 t = get_governor(str_governor);
660 if (request_module("cpufreq_%s", str_governor))
663 return get_governor(str_governor);
667 * cpufreq_per_cpu_attr_read() / show_##file_name() -
668 * print out cpufreq information
670 * Write out information from cpufreq_driver->policy[cpu]; object must be
674 #define show_one(file_name, object) \
675 static ssize_t show_##file_name \
676 (struct cpufreq_policy *policy, char *buf) \
678 return sprintf(buf, "%u\n", policy->object); \
681 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
682 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
683 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
684 show_one(scaling_min_freq, min);
685 show_one(scaling_max_freq, max);
687 __weak unsigned int arch_freq_get_on_cpu(int cpu)
692 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
697 freq = arch_freq_get_on_cpu(policy->cpu);
699 ret = sprintf(buf, "%u\n", freq);
700 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
702 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
704 ret = sprintf(buf, "%u\n", policy->cur);
709 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
711 #define store_one(file_name, object) \
712 static ssize_t store_##file_name \
713 (struct cpufreq_policy *policy, const char *buf, size_t count) \
718 ret = sscanf(buf, "%lu", &val); \
722 ret = freq_qos_update_request(policy->object##_freq_req, val);\
723 return ret >= 0 ? count : ret; \
726 store_one(scaling_min_freq, min);
727 store_one(scaling_max_freq, max);
730 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
732 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
735 unsigned int cur_freq = __cpufreq_get(policy);
738 return sprintf(buf, "%u\n", cur_freq);
740 return sprintf(buf, "<unknown>\n");
744 * show_scaling_governor - show the current policy for the specified CPU
746 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
748 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
749 return sprintf(buf, "powersave\n");
750 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
751 return sprintf(buf, "performance\n");
752 else if (policy->governor)
753 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
754 policy->governor->name);
759 * store_scaling_governor - store policy for the specified CPU
761 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
762 const char *buf, size_t count)
764 char str_governor[16];
767 ret = sscanf(buf, "%15s", str_governor);
771 if (cpufreq_driver->setpolicy) {
772 unsigned int new_pol;
774 new_pol = cpufreq_parse_policy(str_governor);
778 ret = cpufreq_set_policy(policy, NULL, new_pol);
780 struct cpufreq_governor *new_gov;
782 new_gov = cpufreq_parse_governor(str_governor);
786 ret = cpufreq_set_policy(policy, new_gov,
787 CPUFREQ_POLICY_UNKNOWN);
789 module_put(new_gov->owner);
792 return ret ? ret : count;
796 * show_scaling_driver - show the cpufreq driver currently loaded
798 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
800 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
804 * show_scaling_available_governors - show the available CPUfreq governors
806 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
810 struct cpufreq_governor *t;
813 i += sprintf(buf, "performance powersave");
817 mutex_lock(&cpufreq_governor_mutex);
818 for_each_governor(t) {
819 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
820 - (CPUFREQ_NAME_LEN + 2)))
822 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
824 mutex_unlock(&cpufreq_governor_mutex);
826 i += sprintf(&buf[i], "\n");
830 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
835 for_each_cpu(cpu, mask) {
837 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
838 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
839 if (i >= (PAGE_SIZE - 5))
842 i += sprintf(&buf[i], "\n");
845 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
848 * show_related_cpus - show the CPUs affected by each transition even if
849 * hw coordination is in use
851 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
853 return cpufreq_show_cpus(policy->related_cpus, buf);
857 * show_affected_cpus - show the CPUs affected by each transition
859 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
861 return cpufreq_show_cpus(policy->cpus, buf);
864 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
865 const char *buf, size_t count)
867 unsigned int freq = 0;
870 if (!policy->governor || !policy->governor->store_setspeed)
873 ret = sscanf(buf, "%u", &freq);
877 policy->governor->store_setspeed(policy, freq);
882 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
884 if (!policy->governor || !policy->governor->show_setspeed)
885 return sprintf(buf, "<unsupported>\n");
887 return policy->governor->show_setspeed(policy, buf);
891 * show_bios_limit - show the current cpufreq HW/BIOS limitation
893 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
897 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
899 return sprintf(buf, "%u\n", limit);
900 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
903 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
904 cpufreq_freq_attr_ro(cpuinfo_min_freq);
905 cpufreq_freq_attr_ro(cpuinfo_max_freq);
906 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
907 cpufreq_freq_attr_ro(scaling_available_governors);
908 cpufreq_freq_attr_ro(scaling_driver);
909 cpufreq_freq_attr_ro(scaling_cur_freq);
910 cpufreq_freq_attr_ro(bios_limit);
911 cpufreq_freq_attr_ro(related_cpus);
912 cpufreq_freq_attr_ro(affected_cpus);
913 cpufreq_freq_attr_rw(scaling_min_freq);
914 cpufreq_freq_attr_rw(scaling_max_freq);
915 cpufreq_freq_attr_rw(scaling_governor);
916 cpufreq_freq_attr_rw(scaling_setspeed);
918 static struct attribute *default_attrs[] = {
919 &cpuinfo_min_freq.attr,
920 &cpuinfo_max_freq.attr,
921 &cpuinfo_transition_latency.attr,
922 &scaling_min_freq.attr,
923 &scaling_max_freq.attr,
926 &scaling_governor.attr,
927 &scaling_driver.attr,
928 &scaling_available_governors.attr,
929 &scaling_setspeed.attr,
933 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
934 #define to_attr(a) container_of(a, struct freq_attr, attr)
936 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
938 struct cpufreq_policy *policy = to_policy(kobj);
939 struct freq_attr *fattr = to_attr(attr);
945 down_read(&policy->rwsem);
946 ret = fattr->show(policy, buf);
947 up_read(&policy->rwsem);
952 static ssize_t store(struct kobject *kobj, struct attribute *attr,
953 const char *buf, size_t count)
955 struct cpufreq_policy *policy = to_policy(kobj);
956 struct freq_attr *fattr = to_attr(attr);
957 ssize_t ret = -EINVAL;
963 * cpus_read_trylock() is used here to work around a circular lock
964 * dependency problem with respect to the cpufreq_register_driver().
966 if (!cpus_read_trylock())
969 if (cpu_online(policy->cpu)) {
970 down_write(&policy->rwsem);
971 ret = fattr->store(policy, buf, count);
972 up_write(&policy->rwsem);
980 static void cpufreq_sysfs_release(struct kobject *kobj)
982 struct cpufreq_policy *policy = to_policy(kobj);
983 pr_debug("last reference is dropped\n");
984 complete(&policy->kobj_unregister);
987 static const struct sysfs_ops sysfs_ops = {
992 static struct kobj_type ktype_cpufreq = {
993 .sysfs_ops = &sysfs_ops,
994 .default_attrs = default_attrs,
995 .release = cpufreq_sysfs_release,
998 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1004 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1007 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1008 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1009 dev_err(dev, "cpufreq symlink creation failed\n");
1012 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1015 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1016 sysfs_remove_link(&dev->kobj, "cpufreq");
1019 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1021 struct freq_attr **drv_attr;
1024 /* set up files for this cpu device */
1025 drv_attr = cpufreq_driver->attr;
1026 while (drv_attr && *drv_attr) {
1027 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1032 if (cpufreq_driver->get) {
1033 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1038 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1042 if (cpufreq_driver->bios_limit) {
1043 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1051 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1056 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1058 struct cpufreq_governor *def_gov = cpufreq_default_governor();
1059 struct cpufreq_governor *gov = NULL;
1060 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1064 /* Update policy governor to the one used before hotplug. */
1065 gov = get_governor(policy->last_governor);
1067 pr_debug("Restoring governor %s for cpu %d\n",
1068 policy->governor->name, policy->cpu);
1069 } else if (def_gov) {
1071 __module_get(gov->owner);
1076 /* Use the default policy if there is no last_policy. */
1077 if (policy->last_policy) {
1078 pol = policy->last_policy;
1079 } else if (def_gov) {
1080 pol = cpufreq_parse_policy(def_gov->name);
1082 * In case the default governor is neiter "performance"
1083 * nor "powersave", fall back to the initial policy
1084 * value set by the driver.
1086 if (pol == CPUFREQ_POLICY_UNKNOWN)
1087 pol = policy->policy;
1089 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1090 pol != CPUFREQ_POLICY_POWERSAVE)
1094 ret = cpufreq_set_policy(policy, gov, pol);
1096 module_put(gov->owner);
1101 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1105 /* Has this CPU been taken care of already? */
1106 if (cpumask_test_cpu(cpu, policy->cpus))
1109 down_write(&policy->rwsem);
1111 cpufreq_stop_governor(policy);
1113 cpumask_set_cpu(cpu, policy->cpus);
1116 ret = cpufreq_start_governor(policy);
1118 pr_err("%s: Failed to start governor\n", __func__);
1120 up_write(&policy->rwsem);
1124 void refresh_frequency_limits(struct cpufreq_policy *policy)
1126 if (!policy_is_inactive(policy)) {
1127 pr_debug("updating policy for CPU %u\n", policy->cpu);
1129 cpufreq_set_policy(policy, policy->governor, policy->policy);
1132 EXPORT_SYMBOL(refresh_frequency_limits);
1134 static void handle_update(struct work_struct *work)
1136 struct cpufreq_policy *policy =
1137 container_of(work, struct cpufreq_policy, update);
1139 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1140 down_write(&policy->rwsem);
1141 refresh_frequency_limits(policy);
1142 up_write(&policy->rwsem);
1145 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1148 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1150 schedule_work(&policy->update);
1154 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1157 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1159 schedule_work(&policy->update);
1163 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1165 struct kobject *kobj;
1166 struct completion *cmp;
1168 down_write(&policy->rwsem);
1169 cpufreq_stats_free_table(policy);
1170 kobj = &policy->kobj;
1171 cmp = &policy->kobj_unregister;
1172 up_write(&policy->rwsem);
1176 * We need to make sure that the underlying kobj is
1177 * actually not referenced anymore by anybody before we
1178 * proceed with unloading.
1180 pr_debug("waiting for dropping of refcount\n");
1181 wait_for_completion(cmp);
1182 pr_debug("wait complete\n");
1185 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1187 struct cpufreq_policy *policy;
1188 struct device *dev = get_cpu_device(cpu);
1194 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1198 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1199 goto err_free_policy;
1201 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1202 goto err_free_cpumask;
1204 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1205 goto err_free_rcpumask;
1207 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1208 cpufreq_global_kobject, "policy%u", cpu);
1210 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1212 * The entire policy object will be freed below, but the extra
1213 * memory allocated for the kobject name needs to be freed by
1214 * releasing the kobject.
1216 kobject_put(&policy->kobj);
1217 goto err_free_real_cpus;
1220 freq_constraints_init(&policy->constraints);
1222 policy->nb_min.notifier_call = cpufreq_notifier_min;
1223 policy->nb_max.notifier_call = cpufreq_notifier_max;
1225 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1228 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1229 ret, cpumask_pr_args(policy->cpus));
1230 goto err_kobj_remove;
1233 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1236 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1237 ret, cpumask_pr_args(policy->cpus));
1238 goto err_min_qos_notifier;
1241 INIT_LIST_HEAD(&policy->policy_list);
1242 init_rwsem(&policy->rwsem);
1243 spin_lock_init(&policy->transition_lock);
1244 init_waitqueue_head(&policy->transition_wait);
1245 init_completion(&policy->kobj_unregister);
1246 INIT_WORK(&policy->update, handle_update);
1251 err_min_qos_notifier:
1252 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1255 cpufreq_policy_put_kobj(policy);
1257 free_cpumask_var(policy->real_cpus);
1259 free_cpumask_var(policy->related_cpus);
1261 free_cpumask_var(policy->cpus);
1268 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1270 unsigned long flags;
1273 /* Remove policy from list */
1274 write_lock_irqsave(&cpufreq_driver_lock, flags);
1275 list_del(&policy->policy_list);
1277 for_each_cpu(cpu, policy->related_cpus)
1278 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1279 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1281 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1283 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1286 /* Cancel any pending policy->update work before freeing the policy. */
1287 cancel_work_sync(&policy->update);
1289 if (policy->max_freq_req) {
1291 * CPUFREQ_CREATE_POLICY notification is sent only after
1292 * successfully adding max_freq_req request.
1294 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1295 CPUFREQ_REMOVE_POLICY, policy);
1296 freq_qos_remove_request(policy->max_freq_req);
1299 freq_qos_remove_request(policy->min_freq_req);
1300 kfree(policy->min_freq_req);
1302 cpufreq_policy_put_kobj(policy);
1303 free_cpumask_var(policy->real_cpus);
1304 free_cpumask_var(policy->related_cpus);
1305 free_cpumask_var(policy->cpus);
1309 static int cpufreq_online(unsigned int cpu)
1311 struct cpufreq_policy *policy;
1313 unsigned long flags;
1317 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1319 /* Check if this CPU already has a policy to manage it */
1320 policy = per_cpu(cpufreq_cpu_data, cpu);
1322 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1323 if (!policy_is_inactive(policy))
1324 return cpufreq_add_policy_cpu(policy, cpu);
1326 /* This is the only online CPU for the policy. Start over. */
1328 down_write(&policy->rwsem);
1330 policy->governor = NULL;
1331 up_write(&policy->rwsem);
1334 policy = cpufreq_policy_alloc(cpu);
1339 if (!new_policy && cpufreq_driver->online) {
1340 ret = cpufreq_driver->online(policy);
1342 pr_debug("%s: %d: initialization failed\n", __func__,
1344 goto out_exit_policy;
1347 /* Recover policy->cpus using related_cpus */
1348 cpumask_copy(policy->cpus, policy->related_cpus);
1350 cpumask_copy(policy->cpus, cpumask_of(cpu));
1353 * Call driver. From then on the cpufreq must be able
1354 * to accept all calls to ->verify and ->setpolicy for this CPU.
1356 ret = cpufreq_driver->init(policy);
1358 pr_debug("%s: %d: initialization failed\n", __func__,
1360 goto out_free_policy;
1364 * The initialization has succeeded and the policy is online.
1365 * If there is a problem with its frequency table, take it
1366 * offline and drop it.
1368 ret = cpufreq_table_validate_and_sort(policy);
1370 goto out_offline_policy;
1372 /* related_cpus should at least include policy->cpus. */
1373 cpumask_copy(policy->related_cpus, policy->cpus);
1376 down_write(&policy->rwsem);
1378 * affected cpus must always be the one, which are online. We aren't
1379 * managing offline cpus here.
1381 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1384 for_each_cpu(j, policy->related_cpus) {
1385 per_cpu(cpufreq_cpu_data, j) = policy;
1386 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1389 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1391 if (!policy->min_freq_req)
1392 goto out_destroy_policy;
1394 ret = freq_qos_add_request(&policy->constraints,
1395 policy->min_freq_req, FREQ_QOS_MIN,
1396 FREQ_QOS_MIN_DEFAULT_VALUE);
1399 * So we don't call freq_qos_remove_request() for an
1400 * uninitialized request.
1402 kfree(policy->min_freq_req);
1403 policy->min_freq_req = NULL;
1404 goto out_destroy_policy;
1408 * This must be initialized right here to avoid calling
1409 * freq_qos_remove_request() on uninitialized request in case
1412 policy->max_freq_req = policy->min_freq_req + 1;
1414 ret = freq_qos_add_request(&policy->constraints,
1415 policy->max_freq_req, FREQ_QOS_MAX,
1416 FREQ_QOS_MAX_DEFAULT_VALUE);
1418 policy->max_freq_req = NULL;
1419 goto out_destroy_policy;
1422 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1423 CPUFREQ_CREATE_POLICY, policy);
1426 if (cpufreq_driver->get && has_target()) {
1427 policy->cur = cpufreq_driver->get(policy->cpu);
1429 pr_err("%s: ->get() failed\n", __func__);
1430 goto out_destroy_policy;
1435 * Sometimes boot loaders set CPU frequency to a value outside of
1436 * frequency table present with cpufreq core. In such cases CPU might be
1437 * unstable if it has to run on that frequency for long duration of time
1438 * and so its better to set it to a frequency which is specified in
1439 * freq-table. This also makes cpufreq stats inconsistent as
1440 * cpufreq-stats would fail to register because current frequency of CPU
1441 * isn't found in freq-table.
1443 * Because we don't want this change to effect boot process badly, we go
1444 * for the next freq which is >= policy->cur ('cur' must be set by now,
1445 * otherwise we will end up setting freq to lowest of the table as 'cur'
1446 * is initialized to zero).
1448 * We are passing target-freq as "policy->cur - 1" otherwise
1449 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1450 * equal to target-freq.
1452 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1454 /* Are we running at unknown frequency ? */
1455 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1456 if (ret == -EINVAL) {
1457 /* Warn user and fix it */
1458 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1459 __func__, policy->cpu, policy->cur);
1460 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1461 CPUFREQ_RELATION_L);
1464 * Reaching here after boot in a few seconds may not
1465 * mean that system will remain stable at "unknown"
1466 * frequency for longer duration. Hence, a BUG_ON().
1469 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1470 __func__, policy->cpu, policy->cur);
1475 ret = cpufreq_add_dev_interface(policy);
1477 goto out_destroy_policy;
1479 cpufreq_stats_create_table(policy);
1481 write_lock_irqsave(&cpufreq_driver_lock, flags);
1482 list_add(&policy->policy_list, &cpufreq_policy_list);
1483 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1486 ret = cpufreq_init_policy(policy);
1488 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1489 __func__, cpu, ret);
1490 goto out_destroy_policy;
1493 up_write(&policy->rwsem);
1495 kobject_uevent(&policy->kobj, KOBJ_ADD);
1497 /* Callback for handling stuff after policy is ready */
1498 if (cpufreq_driver->ready)
1499 cpufreq_driver->ready(policy);
1501 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1502 policy->cdev = of_cpufreq_cooling_register(policy);
1504 pr_debug("initialization complete\n");
1509 for_each_cpu(j, policy->real_cpus)
1510 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1512 up_write(&policy->rwsem);
1515 if (cpufreq_driver->offline)
1516 cpufreq_driver->offline(policy);
1519 if (cpufreq_driver->exit)
1520 cpufreq_driver->exit(policy);
1523 cpufreq_policy_free(policy);
1528 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1530 * @sif: Subsystem interface structure pointer (not used)
1532 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1534 struct cpufreq_policy *policy;
1535 unsigned cpu = dev->id;
1538 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1540 if (cpu_online(cpu)) {
1541 ret = cpufreq_online(cpu);
1546 /* Create sysfs link on CPU registration */
1547 policy = per_cpu(cpufreq_cpu_data, cpu);
1549 add_cpu_dev_symlink(policy, cpu, dev);
1554 static int cpufreq_offline(unsigned int cpu)
1556 struct cpufreq_policy *policy;
1559 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1561 policy = cpufreq_cpu_get_raw(cpu);
1563 pr_debug("%s: No cpu_data found\n", __func__);
1567 down_write(&policy->rwsem);
1569 cpufreq_stop_governor(policy);
1571 cpumask_clear_cpu(cpu, policy->cpus);
1573 if (policy_is_inactive(policy)) {
1575 strncpy(policy->last_governor, policy->governor->name,
1578 policy->last_policy = policy->policy;
1579 } else if (cpu == policy->cpu) {
1580 /* Nominate new CPU */
1581 policy->cpu = cpumask_any(policy->cpus);
1584 /* Start governor again for active policy */
1585 if (!policy_is_inactive(policy)) {
1587 ret = cpufreq_start_governor(policy);
1589 pr_err("%s: Failed to start governor\n", __func__);
1595 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1596 cpufreq_cooling_unregister(policy->cdev);
1597 policy->cdev = NULL;
1600 if (cpufreq_driver->stop_cpu)
1601 cpufreq_driver->stop_cpu(policy);
1604 cpufreq_exit_governor(policy);
1607 * Perform the ->offline() during light-weight tear-down, as
1608 * that allows fast recovery when the CPU comes back.
1610 if (cpufreq_driver->offline) {
1611 cpufreq_driver->offline(policy);
1612 } else if (cpufreq_driver->exit) {
1613 cpufreq_driver->exit(policy);
1614 policy->freq_table = NULL;
1618 up_write(&policy->rwsem);
1623 * cpufreq_remove_dev - remove a CPU device
1625 * Removes the cpufreq interface for a CPU device.
1627 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1629 unsigned int cpu = dev->id;
1630 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1635 if (cpu_online(cpu))
1636 cpufreq_offline(cpu);
1638 cpumask_clear_cpu(cpu, policy->real_cpus);
1639 remove_cpu_dev_symlink(policy, dev);
1641 if (cpumask_empty(policy->real_cpus)) {
1642 /* We did light-weight exit earlier, do full tear down now */
1643 if (cpufreq_driver->offline)
1644 cpufreq_driver->exit(policy);
1646 cpufreq_policy_free(policy);
1651 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1653 * @policy: policy managing CPUs
1654 * @new_freq: CPU frequency the CPU actually runs at
1656 * We adjust to current frequency first, and need to clean up later.
1657 * So either call to cpufreq_update_policy() or schedule handle_update()).
1659 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1660 unsigned int new_freq)
1662 struct cpufreq_freqs freqs;
1664 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1665 policy->cur, new_freq);
1667 freqs.old = policy->cur;
1668 freqs.new = new_freq;
1670 cpufreq_freq_transition_begin(policy, &freqs);
1671 cpufreq_freq_transition_end(policy, &freqs, 0);
1674 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1676 unsigned int new_freq;
1678 new_freq = cpufreq_driver->get(policy->cpu);
1683 * If fast frequency switching is used with the given policy, the check
1684 * against policy->cur is pointless, so skip it in that case.
1686 if (policy->fast_switch_enabled || !has_target())
1689 if (policy->cur != new_freq) {
1690 cpufreq_out_of_sync(policy, new_freq);
1692 schedule_work(&policy->update);
1699 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1702 * This is the last known freq, without actually getting it from the driver.
1703 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1705 unsigned int cpufreq_quick_get(unsigned int cpu)
1707 struct cpufreq_policy *policy;
1708 unsigned int ret_freq = 0;
1709 unsigned long flags;
1711 read_lock_irqsave(&cpufreq_driver_lock, flags);
1713 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1714 ret_freq = cpufreq_driver->get(cpu);
1715 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1719 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1721 policy = cpufreq_cpu_get(cpu);
1723 ret_freq = policy->cur;
1724 cpufreq_cpu_put(policy);
1729 EXPORT_SYMBOL(cpufreq_quick_get);
1732 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1735 * Just return the max possible frequency for a given CPU.
1737 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1739 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1740 unsigned int ret_freq = 0;
1743 ret_freq = policy->max;
1744 cpufreq_cpu_put(policy);
1749 EXPORT_SYMBOL(cpufreq_quick_get_max);
1751 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1753 if (unlikely(policy_is_inactive(policy)))
1756 return cpufreq_verify_current_freq(policy, true);
1760 * cpufreq_get - get the current CPU frequency (in kHz)
1763 * Get the CPU current (static) CPU frequency
1765 unsigned int cpufreq_get(unsigned int cpu)
1767 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1768 unsigned int ret_freq = 0;
1771 down_read(&policy->rwsem);
1772 if (cpufreq_driver->get)
1773 ret_freq = __cpufreq_get(policy);
1774 up_read(&policy->rwsem);
1776 cpufreq_cpu_put(policy);
1781 EXPORT_SYMBOL(cpufreq_get);
1783 static struct subsys_interface cpufreq_interface = {
1785 .subsys = &cpu_subsys,
1786 .add_dev = cpufreq_add_dev,
1787 .remove_dev = cpufreq_remove_dev,
1791 * In case platform wants some specific frequency to be configured
1794 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1798 if (!policy->suspend_freq) {
1799 pr_debug("%s: suspend_freq not defined\n", __func__);
1803 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1804 policy->suspend_freq);
1806 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1807 CPUFREQ_RELATION_H);
1809 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1810 __func__, policy->suspend_freq, ret);
1814 EXPORT_SYMBOL(cpufreq_generic_suspend);
1817 * cpufreq_suspend() - Suspend CPUFreq governors
1819 * Called during system wide Suspend/Hibernate cycles for suspending governors
1820 * as some platforms can't change frequency after this point in suspend cycle.
1821 * Because some of the devices (like: i2c, regulators, etc) they use for
1822 * changing frequency are suspended quickly after this point.
1824 void cpufreq_suspend(void)
1826 struct cpufreq_policy *policy;
1828 if (!cpufreq_driver)
1831 if (!has_target() && !cpufreq_driver->suspend)
1834 pr_debug("%s: Suspending Governors\n", __func__);
1836 for_each_active_policy(policy) {
1838 down_write(&policy->rwsem);
1839 cpufreq_stop_governor(policy);
1840 up_write(&policy->rwsem);
1843 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1844 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1845 cpufreq_driver->name);
1849 cpufreq_suspended = true;
1853 * cpufreq_resume() - Resume CPUFreq governors
1855 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1856 * are suspended with cpufreq_suspend().
1858 void cpufreq_resume(void)
1860 struct cpufreq_policy *policy;
1863 if (!cpufreq_driver)
1866 if (unlikely(!cpufreq_suspended))
1869 cpufreq_suspended = false;
1871 if (!has_target() && !cpufreq_driver->resume)
1874 pr_debug("%s: Resuming Governors\n", __func__);
1876 for_each_active_policy(policy) {
1877 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1878 pr_err("%s: Failed to resume driver: %p\n", __func__,
1880 } else if (has_target()) {
1881 down_write(&policy->rwsem);
1882 ret = cpufreq_start_governor(policy);
1883 up_write(&policy->rwsem);
1886 pr_err("%s: Failed to start governor for policy: %p\n",
1893 * cpufreq_get_current_driver - return current driver's name
1895 * Return the name string of the currently loaded cpufreq driver
1898 const char *cpufreq_get_current_driver(void)
1901 return cpufreq_driver->name;
1905 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1908 * cpufreq_get_driver_data - return current driver data
1910 * Return the private data of the currently loaded cpufreq
1911 * driver, or NULL if no cpufreq driver is loaded.
1913 void *cpufreq_get_driver_data(void)
1916 return cpufreq_driver->driver_data;
1920 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1922 /*********************************************************************
1923 * NOTIFIER LISTS INTERFACE *
1924 *********************************************************************/
1927 * cpufreq_register_notifier - register a driver with cpufreq
1928 * @nb: notifier function to register
1929 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1931 * Add a driver to one of two lists: either a list of drivers that
1932 * are notified about clock rate changes (once before and once after
1933 * the transition), or a list of drivers that are notified about
1934 * changes in cpufreq policy.
1936 * This function may sleep, and has the same return conditions as
1937 * blocking_notifier_chain_register.
1939 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1943 if (cpufreq_disabled())
1947 case CPUFREQ_TRANSITION_NOTIFIER:
1948 mutex_lock(&cpufreq_fast_switch_lock);
1950 if (cpufreq_fast_switch_count > 0) {
1951 mutex_unlock(&cpufreq_fast_switch_lock);
1954 ret = srcu_notifier_chain_register(
1955 &cpufreq_transition_notifier_list, nb);
1957 cpufreq_fast_switch_count--;
1959 mutex_unlock(&cpufreq_fast_switch_lock);
1961 case CPUFREQ_POLICY_NOTIFIER:
1962 ret = blocking_notifier_chain_register(
1963 &cpufreq_policy_notifier_list, nb);
1971 EXPORT_SYMBOL(cpufreq_register_notifier);
1974 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1975 * @nb: notifier block to be unregistered
1976 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1978 * Remove a driver from the CPU frequency notifier list.
1980 * This function may sleep, and has the same return conditions as
1981 * blocking_notifier_chain_unregister.
1983 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1987 if (cpufreq_disabled())
1991 case CPUFREQ_TRANSITION_NOTIFIER:
1992 mutex_lock(&cpufreq_fast_switch_lock);
1994 ret = srcu_notifier_chain_unregister(
1995 &cpufreq_transition_notifier_list, nb);
1996 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1997 cpufreq_fast_switch_count++;
1999 mutex_unlock(&cpufreq_fast_switch_lock);
2001 case CPUFREQ_POLICY_NOTIFIER:
2002 ret = blocking_notifier_chain_unregister(
2003 &cpufreq_policy_notifier_list, nb);
2011 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2014 /*********************************************************************
2016 *********************************************************************/
2019 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2020 * @policy: cpufreq policy to switch the frequency for.
2021 * @target_freq: New frequency to set (may be approximate).
2023 * Carry out a fast frequency switch without sleeping.
2025 * The driver's ->fast_switch() callback invoked by this function must be
2026 * suitable for being called from within RCU-sched read-side critical sections
2027 * and it is expected to select the minimum available frequency greater than or
2028 * equal to @target_freq (CPUFREQ_RELATION_L).
2030 * This function must not be called if policy->fast_switch_enabled is unset.
2032 * Governors calling this function must guarantee that it will never be invoked
2033 * twice in parallel for the same policy and that it will never be called in
2034 * parallel with either ->target() or ->target_index() for the same policy.
2036 * Returns the actual frequency set for the CPU.
2038 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2039 * error condition, the hardware configuration must be preserved.
2041 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2042 unsigned int target_freq)
2044 target_freq = clamp_val(target_freq, policy->min, policy->max);
2046 return cpufreq_driver->fast_switch(policy, target_freq);
2048 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2050 /* Must set freqs->new to intermediate frequency */
2051 static int __target_intermediate(struct cpufreq_policy *policy,
2052 struct cpufreq_freqs *freqs, int index)
2056 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2058 /* We don't need to switch to intermediate freq */
2062 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2063 __func__, policy->cpu, freqs->old, freqs->new);
2065 cpufreq_freq_transition_begin(policy, freqs);
2066 ret = cpufreq_driver->target_intermediate(policy, index);
2067 cpufreq_freq_transition_end(policy, freqs, ret);
2070 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2076 static int __target_index(struct cpufreq_policy *policy, int index)
2078 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2079 unsigned int intermediate_freq = 0;
2080 unsigned int newfreq = policy->freq_table[index].frequency;
2081 int retval = -EINVAL;
2084 if (newfreq == policy->cur)
2087 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2089 /* Handle switching to intermediate frequency */
2090 if (cpufreq_driver->get_intermediate) {
2091 retval = __target_intermediate(policy, &freqs, index);
2095 intermediate_freq = freqs.new;
2096 /* Set old freq to intermediate */
2097 if (intermediate_freq)
2098 freqs.old = freqs.new;
2101 freqs.new = newfreq;
2102 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2103 __func__, policy->cpu, freqs.old, freqs.new);
2105 cpufreq_freq_transition_begin(policy, &freqs);
2108 retval = cpufreq_driver->target_index(policy, index);
2110 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2114 cpufreq_freq_transition_end(policy, &freqs, retval);
2117 * Failed after setting to intermediate freq? Driver should have
2118 * reverted back to initial frequency and so should we. Check
2119 * here for intermediate_freq instead of get_intermediate, in
2120 * case we haven't switched to intermediate freq at all.
2122 if (unlikely(retval && intermediate_freq)) {
2123 freqs.old = intermediate_freq;
2124 freqs.new = policy->restore_freq;
2125 cpufreq_freq_transition_begin(policy, &freqs);
2126 cpufreq_freq_transition_end(policy, &freqs, 0);
2133 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2134 unsigned int target_freq,
2135 unsigned int relation)
2137 unsigned int old_target_freq = target_freq;
2140 if (cpufreq_disabled())
2143 /* Make sure that target_freq is within supported range */
2144 target_freq = clamp_val(target_freq, policy->min, policy->max);
2146 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2147 policy->cpu, target_freq, relation, old_target_freq);
2150 * This might look like a redundant call as we are checking it again
2151 * after finding index. But it is left intentionally for cases where
2152 * exactly same freq is called again and so we can save on few function
2155 if (target_freq == policy->cur)
2158 /* Save last value to restore later on errors */
2159 policy->restore_freq = policy->cur;
2161 if (cpufreq_driver->target)
2162 return cpufreq_driver->target(policy, target_freq, relation);
2164 if (!cpufreq_driver->target_index)
2167 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2169 return __target_index(policy, index);
2171 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2173 int cpufreq_driver_target(struct cpufreq_policy *policy,
2174 unsigned int target_freq,
2175 unsigned int relation)
2179 down_write(&policy->rwsem);
2181 ret = __cpufreq_driver_target(policy, target_freq, relation);
2183 up_write(&policy->rwsem);
2187 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2189 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2194 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2198 /* Don't start any governor operations if we are entering suspend */
2199 if (cpufreq_suspended)
2202 * Governor might not be initiated here if ACPI _PPC changed
2203 * notification happened, so check it.
2205 if (!policy->governor)
2208 /* Platform doesn't want dynamic frequency switching ? */
2209 if (policy->governor->dynamic_switching &&
2210 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2211 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2214 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2215 policy->governor->name, gov->name);
2216 policy->governor = gov;
2222 if (!try_module_get(policy->governor->owner))
2225 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2227 if (policy->governor->init) {
2228 ret = policy->governor->init(policy);
2230 module_put(policy->governor->owner);
2238 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2240 if (cpufreq_suspended || !policy->governor)
2243 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2245 if (policy->governor->exit)
2246 policy->governor->exit(policy);
2248 module_put(policy->governor->owner);
2251 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2255 if (cpufreq_suspended)
2258 if (!policy->governor)
2261 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2263 if (cpufreq_driver->get)
2264 cpufreq_verify_current_freq(policy, false);
2266 if (policy->governor->start) {
2267 ret = policy->governor->start(policy);
2272 if (policy->governor->limits)
2273 policy->governor->limits(policy);
2278 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2280 if (cpufreq_suspended || !policy->governor)
2283 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2285 if (policy->governor->stop)
2286 policy->governor->stop(policy);
2289 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2291 if (cpufreq_suspended || !policy->governor)
2294 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2296 if (policy->governor->limits)
2297 policy->governor->limits(policy);
2300 int cpufreq_register_governor(struct cpufreq_governor *governor)
2307 if (cpufreq_disabled())
2310 mutex_lock(&cpufreq_governor_mutex);
2313 if (!find_governor(governor->name)) {
2315 list_add(&governor->governor_list, &cpufreq_governor_list);
2318 mutex_unlock(&cpufreq_governor_mutex);
2321 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2323 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2325 struct cpufreq_policy *policy;
2326 unsigned long flags;
2331 if (cpufreq_disabled())
2334 /* clear last_governor for all inactive policies */
2335 read_lock_irqsave(&cpufreq_driver_lock, flags);
2336 for_each_inactive_policy(policy) {
2337 if (!strcmp(policy->last_governor, governor->name)) {
2338 policy->governor = NULL;
2339 strcpy(policy->last_governor, "\0");
2342 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2344 mutex_lock(&cpufreq_governor_mutex);
2345 list_del(&governor->governor_list);
2346 mutex_unlock(&cpufreq_governor_mutex);
2348 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2351 /*********************************************************************
2352 * POLICY INTERFACE *
2353 *********************************************************************/
2356 * cpufreq_get_policy - get the current cpufreq_policy
2357 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2360 * Reads the current cpufreq policy.
2362 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2364 struct cpufreq_policy *cpu_policy;
2368 cpu_policy = cpufreq_cpu_get(cpu);
2372 memcpy(policy, cpu_policy, sizeof(*policy));
2374 cpufreq_cpu_put(cpu_policy);
2377 EXPORT_SYMBOL(cpufreq_get_policy);
2380 * cpufreq_set_policy - Modify cpufreq policy parameters.
2381 * @policy: Policy object to modify.
2382 * @new_gov: Policy governor pointer.
2383 * @new_pol: Policy value (for drivers with built-in governors).
2385 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2386 * limits to be set for the policy, update @policy with the verified limits
2387 * values and either invoke the driver's ->setpolicy() callback (if present) or
2388 * carry out a governor update for @policy. That is, run the current governor's
2389 * ->limits() callback (if @new_gov points to the same object as the one in
2390 * @policy) or replace the governor for @policy with @new_gov.
2392 * The cpuinfo part of @policy is not updated by this function.
2394 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2395 struct cpufreq_governor *new_gov,
2396 unsigned int new_pol)
2398 struct cpufreq_policy_data new_data;
2399 struct cpufreq_governor *old_gov;
2402 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2403 new_data.freq_table = policy->freq_table;
2404 new_data.cpu = policy->cpu;
2406 * PM QoS framework collects all the requests from users and provide us
2407 * the final aggregated value here.
2409 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2410 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2412 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2413 new_data.cpu, new_data.min, new_data.max);
2415 /* verify the cpu speed can be set within this limit */
2416 ret = cpufreq_driver->verify(&new_data);
2420 policy->min = new_data.min;
2421 policy->max = new_data.max;
2422 trace_cpu_frequency_limits(policy);
2424 policy->cached_target_freq = UINT_MAX;
2426 pr_debug("new min and max freqs are %u - %u kHz\n",
2427 policy->min, policy->max);
2429 if (cpufreq_driver->setpolicy) {
2430 policy->policy = new_pol;
2431 pr_debug("setting range\n");
2432 return cpufreq_driver->setpolicy(policy);
2435 if (new_gov == policy->governor) {
2436 pr_debug("governor limits update\n");
2437 cpufreq_governor_limits(policy);
2441 pr_debug("governor switch\n");
2443 /* save old, working values */
2444 old_gov = policy->governor;
2445 /* end old governor */
2447 cpufreq_stop_governor(policy);
2448 cpufreq_exit_governor(policy);
2451 /* start new governor */
2452 policy->governor = new_gov;
2453 ret = cpufreq_init_governor(policy);
2455 ret = cpufreq_start_governor(policy);
2457 pr_debug("governor change\n");
2458 sched_cpufreq_governor_change(policy, old_gov);
2461 cpufreq_exit_governor(policy);
2464 /* new governor failed, so re-start old one */
2465 pr_debug("starting governor %s failed\n", policy->governor->name);
2467 policy->governor = old_gov;
2468 if (cpufreq_init_governor(policy))
2469 policy->governor = NULL;
2471 cpufreq_start_governor(policy);
2478 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2479 * @cpu: CPU to re-evaluate the policy for.
2481 * Update the current frequency for the cpufreq policy of @cpu and use
2482 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2483 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2484 * for the policy in question, among other things.
2486 void cpufreq_update_policy(unsigned int cpu)
2488 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2494 * BIOS might change freq behind our back
2495 * -> ask driver for current freq and notify governors about a change
2497 if (cpufreq_driver->get && has_target() &&
2498 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2501 refresh_frequency_limits(policy);
2504 cpufreq_cpu_release(policy);
2506 EXPORT_SYMBOL(cpufreq_update_policy);
2509 * cpufreq_update_limits - Update policy limits for a given CPU.
2510 * @cpu: CPU to update the policy limits for.
2512 * Invoke the driver's ->update_limits callback if present or call
2513 * cpufreq_update_policy() for @cpu.
2515 void cpufreq_update_limits(unsigned int cpu)
2517 if (cpufreq_driver->update_limits)
2518 cpufreq_driver->update_limits(cpu);
2520 cpufreq_update_policy(cpu);
2522 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2524 /*********************************************************************
2526 *********************************************************************/
2527 static int cpufreq_boost_set_sw(int state)
2529 struct cpufreq_policy *policy;
2531 for_each_active_policy(policy) {
2534 if (!policy->freq_table)
2537 ret = cpufreq_frequency_table_cpuinfo(policy,
2538 policy->freq_table);
2540 pr_err("%s: Policy frequency update failed\n",
2545 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2553 int cpufreq_boost_trigger_state(int state)
2555 unsigned long flags;
2558 if (cpufreq_driver->boost_enabled == state)
2561 write_lock_irqsave(&cpufreq_driver_lock, flags);
2562 cpufreq_driver->boost_enabled = state;
2563 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2565 ret = cpufreq_driver->set_boost(state);
2567 write_lock_irqsave(&cpufreq_driver_lock, flags);
2568 cpufreq_driver->boost_enabled = !state;
2569 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2571 pr_err("%s: Cannot %s BOOST\n",
2572 __func__, state ? "enable" : "disable");
2578 static bool cpufreq_boost_supported(void)
2580 return cpufreq_driver->set_boost;
2583 static int create_boost_sysfs_file(void)
2587 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2589 pr_err("%s: cannot register global BOOST sysfs file\n",
2595 static void remove_boost_sysfs_file(void)
2597 if (cpufreq_boost_supported())
2598 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2601 int cpufreq_enable_boost_support(void)
2603 if (!cpufreq_driver)
2606 if (cpufreq_boost_supported())
2609 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2611 /* This will get removed on driver unregister */
2612 return create_boost_sysfs_file();
2614 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2616 int cpufreq_boost_enabled(void)
2618 return cpufreq_driver->boost_enabled;
2620 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2622 /*********************************************************************
2623 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2624 *********************************************************************/
2625 static enum cpuhp_state hp_online;
2627 static int cpuhp_cpufreq_online(unsigned int cpu)
2629 cpufreq_online(cpu);
2634 static int cpuhp_cpufreq_offline(unsigned int cpu)
2636 cpufreq_offline(cpu);
2642 * cpufreq_register_driver - register a CPU Frequency driver
2643 * @driver_data: A struct cpufreq_driver containing the values#
2644 * submitted by the CPU Frequency driver.
2646 * Registers a CPU Frequency driver to this core code. This code
2647 * returns zero on success, -EEXIST when another driver got here first
2648 * (and isn't unregistered in the meantime).
2651 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2653 unsigned long flags;
2656 if (cpufreq_disabled())
2660 * The cpufreq core depends heavily on the availability of device
2661 * structure, make sure they are available before proceeding further.
2663 if (!get_cpu_device(0))
2664 return -EPROBE_DEFER;
2666 if (!driver_data || !driver_data->verify || !driver_data->init ||
2667 !(driver_data->setpolicy || driver_data->target_index ||
2668 driver_data->target) ||
2669 (driver_data->setpolicy && (driver_data->target_index ||
2670 driver_data->target)) ||
2671 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2672 (!driver_data->online != !driver_data->offline))
2675 pr_debug("trying to register driver %s\n", driver_data->name);
2677 /* Protect against concurrent CPU online/offline. */
2680 write_lock_irqsave(&cpufreq_driver_lock, flags);
2681 if (cpufreq_driver) {
2682 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2686 cpufreq_driver = driver_data;
2687 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2689 if (driver_data->setpolicy)
2690 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2692 if (cpufreq_boost_supported()) {
2693 ret = create_boost_sysfs_file();
2695 goto err_null_driver;
2698 ret = subsys_interface_register(&cpufreq_interface);
2700 goto err_boost_unreg;
2702 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2703 list_empty(&cpufreq_policy_list)) {
2704 /* if all ->init() calls failed, unregister */
2706 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2711 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2713 cpuhp_cpufreq_online,
2714 cpuhp_cpufreq_offline);
2720 pr_debug("driver %s up and running\n", driver_data->name);
2724 subsys_interface_unregister(&cpufreq_interface);
2726 remove_boost_sysfs_file();
2728 write_lock_irqsave(&cpufreq_driver_lock, flags);
2729 cpufreq_driver = NULL;
2730 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2735 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2738 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2740 * Unregister the current CPUFreq driver. Only call this if you have
2741 * the right to do so, i.e. if you have succeeded in initialising before!
2742 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2743 * currently not initialised.
2745 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2747 unsigned long flags;
2749 if (!cpufreq_driver || (driver != cpufreq_driver))
2752 pr_debug("unregistering driver %s\n", driver->name);
2754 /* Protect against concurrent cpu hotplug */
2756 subsys_interface_unregister(&cpufreq_interface);
2757 remove_boost_sysfs_file();
2758 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2760 write_lock_irqsave(&cpufreq_driver_lock, flags);
2762 cpufreq_driver = NULL;
2764 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2769 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2771 struct kobject *cpufreq_global_kobject;
2772 EXPORT_SYMBOL(cpufreq_global_kobject);
2774 static int __init cpufreq_core_init(void)
2776 if (cpufreq_disabled())
2779 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2780 BUG_ON(!cpufreq_global_kobject);
2784 module_param(off, int, 0444);
2785 core_initcall(cpufreq_core_init);