2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
43 return active == !policy_is_inactive(policy);
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
51 policy = list_next_entry(policy, policy_list);
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
56 } while (!suitable_policy(policy, active));
61 static struct cpufreq_policy *first_policy(bool active)
63 struct cpufreq_policy *policy;
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
82 __policy = next_policy(__policy, __active))
84 #define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
89 #define for_each_policy(__policy) \
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list);
94 #define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
98 * The "cpufreq driver" - the arch- or hardware-dependent low
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
102 static struct cpufreq_driver *cpufreq_driver;
103 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104 static DEFINE_RWLOCK(cpufreq_driver_lock);
105 DEFINE_MUTEX(cpufreq_governor_lock);
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended;
110 static inline bool has_target(void)
112 return cpufreq_driver->target_index || cpufreq_driver->target;
115 /* internal prototypes */
116 static int __cpufreq_governor(struct cpufreq_policy *policy,
118 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
119 static void handle_update(struct work_struct *work);
122 * Two notifier lists: the "policy" list is involved in the
123 * validation process for a new CPU frequency policy; the
124 * "transition" list for kernel code that needs to handle
125 * changes to devices when the CPU clock speed changes.
126 * The mutex locks both lists.
128 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
129 static struct srcu_notifier_head cpufreq_transition_notifier_list;
131 static bool init_cpufreq_transition_notifier_list_called;
132 static int __init init_cpufreq_transition_notifier_list(void)
134 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
135 init_cpufreq_transition_notifier_list_called = true;
138 pure_initcall(init_cpufreq_transition_notifier_list);
140 static int off __read_mostly;
141 static int cpufreq_disabled(void)
145 void disable_cpufreq(void)
149 static DEFINE_MUTEX(cpufreq_governor_mutex);
151 bool have_governor_per_policy(void)
153 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
155 EXPORT_SYMBOL_GPL(have_governor_per_policy);
157 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
159 if (have_governor_per_policy())
160 return &policy->kobj;
162 return cpufreq_global_kobject;
164 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
166 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
168 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
170 return policy && !policy_is_inactive(policy) ?
171 policy->freq_table : NULL;
173 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
175 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
181 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
183 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
186 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
187 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
188 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
190 idle_time = cur_wall_time - busy_time;
192 *wall = cputime_to_usecs(cur_wall_time);
194 return cputime_to_usecs(idle_time);
197 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
199 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
201 if (idle_time == -1ULL)
202 return get_cpu_idle_time_jiffy(cpu, wall);
204 idle_time += get_cpu_iowait_time_us(cpu, wall);
208 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
211 * This is a generic cpufreq init() routine which can be used by cpufreq
212 * drivers of SMP systems. It will do following:
213 * - validate & show freq table passed
214 * - set policies transition latency
215 * - policy->cpus with all possible CPUs
217 int cpufreq_generic_init(struct cpufreq_policy *policy,
218 struct cpufreq_frequency_table *table,
219 unsigned int transition_latency)
223 ret = cpufreq_table_validate_and_show(policy, table);
225 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
229 policy->cpuinfo.transition_latency = transition_latency;
232 * The driver only supports the SMP configuration where all processors
233 * share the clock and voltage and clock.
235 cpumask_setall(policy->cpus);
239 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
241 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
249 unsigned int cpufreq_generic_get(unsigned int cpu)
251 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
253 if (!policy || IS_ERR(policy->clk)) {
254 pr_err("%s: No %s associated to cpu: %d\n",
255 __func__, policy ? "clk" : "policy", cpu);
259 return clk_get_rate(policy->clk) / 1000;
261 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
264 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
266 * @cpu: cpu to find policy for.
268 * This returns policy for 'cpu', returns NULL if it doesn't exist.
269 * It also increments the kobject reference count to mark it busy and so would
270 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272 * freed as that depends on the kobj count.
274 * Return: A valid policy on success, otherwise NULL on failure.
276 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
278 struct cpufreq_policy *policy = NULL;
281 if (WARN_ON(cpu >= nr_cpu_ids))
284 /* get the cpufreq driver */
285 read_lock_irqsave(&cpufreq_driver_lock, flags);
287 if (cpufreq_driver) {
289 policy = cpufreq_cpu_get_raw(cpu);
291 kobject_get(&policy->kobj);
294 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
298 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
301 * cpufreq_cpu_put: Decrements the usage count of a policy
303 * @policy: policy earlier returned by cpufreq_cpu_get().
305 * This decrements the kobject reference count incremented earlier by calling
308 void cpufreq_cpu_put(struct cpufreq_policy *policy)
310 kobject_put(&policy->kobj);
312 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
314 /*********************************************************************
315 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
316 *********************************************************************/
319 * adjust_jiffies - adjust the system "loops_per_jiffy"
321 * This function alters the system "loops_per_jiffy" for the clock
322 * speed change. Note that loops_per_jiffy cannot be updated on SMP
323 * systems as each CPU might be scaled differently. So, use the arch
324 * per-CPU loops_per_jiffy value wherever possible.
326 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
329 static unsigned long l_p_j_ref;
330 static unsigned int l_p_j_ref_freq;
332 if (ci->flags & CPUFREQ_CONST_LOOPS)
335 if (!l_p_j_ref_freq) {
336 l_p_j_ref = loops_per_jiffy;
337 l_p_j_ref_freq = ci->old;
338 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339 l_p_j_ref, l_p_j_ref_freq);
341 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
342 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
344 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345 loops_per_jiffy, ci->new);
350 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
351 struct cpufreq_freqs *freqs, unsigned int state)
353 BUG_ON(irqs_disabled());
355 if (cpufreq_disabled())
358 freqs->flags = cpufreq_driver->flags;
359 pr_debug("notification %u of frequency transition to %u kHz\n",
364 case CPUFREQ_PRECHANGE:
365 /* detect if the driver reported a value as "old frequency"
366 * which is not equal to what the cpufreq core thinks is
369 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
370 if ((policy) && (policy->cpu == freqs->cpu) &&
371 (policy->cur) && (policy->cur != freqs->old)) {
372 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373 freqs->old, policy->cur);
374 freqs->old = policy->cur;
377 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
378 CPUFREQ_PRECHANGE, freqs);
379 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
382 case CPUFREQ_POSTCHANGE:
383 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
384 pr_debug("FREQ: %lu - CPU: %lu\n",
385 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
386 trace_cpu_frequency(freqs->new, freqs->cpu);
387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
388 CPUFREQ_POSTCHANGE, freqs);
389 if (likely(policy) && likely(policy->cpu == freqs->cpu))
390 policy->cur = freqs->new;
396 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397 * on frequency transition.
399 * This function calls the transition notifiers and the "adjust_jiffies"
400 * function. It is called twice on all CPU frequency changes that have
403 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
404 struct cpufreq_freqs *freqs, unsigned int state)
406 for_each_cpu(freqs->cpu, policy->cpus)
407 __cpufreq_notify_transition(policy, freqs, state);
410 /* Do post notifications when there are chances that transition has failed */
411 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
412 struct cpufreq_freqs *freqs, int transition_failed)
414 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
415 if (!transition_failed)
418 swap(freqs->old, freqs->new);
419 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
420 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
423 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
424 struct cpufreq_freqs *freqs)
428 * Catch double invocations of _begin() which lead to self-deadlock.
429 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430 * doesn't invoke _begin() on their behalf, and hence the chances of
431 * double invocations are very low. Moreover, there are scenarios
432 * where these checks can emit false-positive warnings in these
433 * drivers; so we avoid that by skipping them altogether.
435 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
436 && current == policy->transition_task);
439 wait_event(policy->transition_wait, !policy->transition_ongoing);
441 spin_lock(&policy->transition_lock);
443 if (unlikely(policy->transition_ongoing)) {
444 spin_unlock(&policy->transition_lock);
448 policy->transition_ongoing = true;
449 policy->transition_task = current;
451 spin_unlock(&policy->transition_lock);
453 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
455 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
457 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
458 struct cpufreq_freqs *freqs, int transition_failed)
460 if (unlikely(WARN_ON(!policy->transition_ongoing)))
463 cpufreq_notify_post_transition(policy, freqs, transition_failed);
465 policy->transition_ongoing = false;
466 policy->transition_task = NULL;
468 wake_up(&policy->transition_wait);
470 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
473 /*********************************************************************
475 *********************************************************************/
476 static ssize_t show_boost(struct kobject *kobj,
477 struct kobj_attribute *attr, char *buf)
479 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
482 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
483 const char *buf, size_t count)
487 ret = sscanf(buf, "%d", &enable);
488 if (ret != 1 || enable < 0 || enable > 1)
491 if (cpufreq_boost_trigger_state(enable)) {
492 pr_err("%s: Cannot %s BOOST!\n",
493 __func__, enable ? "enable" : "disable");
497 pr_debug("%s: cpufreq BOOST %s\n",
498 __func__, enable ? "enabled" : "disabled");
502 define_one_global_rw(boost);
504 static struct cpufreq_governor *find_governor(const char *str_governor)
506 struct cpufreq_governor *t;
509 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
516 * cpufreq_parse_governor - parse a governor string
518 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
519 struct cpufreq_governor **governor)
523 if (cpufreq_driver->setpolicy) {
524 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
525 *policy = CPUFREQ_POLICY_PERFORMANCE;
527 } else if (!strncasecmp(str_governor, "powersave",
529 *policy = CPUFREQ_POLICY_POWERSAVE;
533 struct cpufreq_governor *t;
535 mutex_lock(&cpufreq_governor_mutex);
537 t = find_governor(str_governor);
542 mutex_unlock(&cpufreq_governor_mutex);
543 ret = request_module("cpufreq_%s", str_governor);
544 mutex_lock(&cpufreq_governor_mutex);
547 t = find_governor(str_governor);
555 mutex_unlock(&cpufreq_governor_mutex);
561 * cpufreq_per_cpu_attr_read() / show_##file_name() -
562 * print out cpufreq information
564 * Write out information from cpufreq_driver->policy[cpu]; object must be
568 #define show_one(file_name, object) \
569 static ssize_t show_##file_name \
570 (struct cpufreq_policy *policy, char *buf) \
572 return sprintf(buf, "%u\n", policy->object); \
575 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
576 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
577 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
578 show_one(scaling_min_freq, min);
579 show_one(scaling_max_freq, max);
581 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
585 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
586 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
588 ret = sprintf(buf, "%u\n", policy->cur);
592 static int cpufreq_set_policy(struct cpufreq_policy *policy,
593 struct cpufreq_policy *new_policy);
596 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
598 #define store_one(file_name, object) \
599 static ssize_t store_##file_name \
600 (struct cpufreq_policy *policy, const char *buf, size_t count) \
603 struct cpufreq_policy new_policy; \
605 memcpy(&new_policy, policy, sizeof(*policy)); \
606 new_policy.min = policy->user_policy.min; \
607 new_policy.max = policy->user_policy.max; \
609 ret = sscanf(buf, "%u", &new_policy.object); \
613 temp = new_policy.object; \
614 ret = cpufreq_set_policy(policy, &new_policy); \
616 policy->user_policy.object = temp; \
618 return ret ? ret : count; \
621 store_one(scaling_min_freq, min);
622 store_one(scaling_max_freq, max);
625 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
627 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
630 unsigned int cur_freq = __cpufreq_get(policy);
633 return sprintf(buf, "%u\n", cur_freq);
635 return sprintf(buf, "<unknown>\n");
639 * show_scaling_governor - show the current policy for the specified CPU
641 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
643 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
644 return sprintf(buf, "powersave\n");
645 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
646 return sprintf(buf, "performance\n");
647 else if (policy->governor)
648 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
649 policy->governor->name);
654 * store_scaling_governor - store policy for the specified CPU
656 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
657 const char *buf, size_t count)
660 char str_governor[16];
661 struct cpufreq_policy new_policy;
663 memcpy(&new_policy, policy, sizeof(*policy));
665 ret = sscanf(buf, "%15s", str_governor);
669 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
670 &new_policy.governor))
673 ret = cpufreq_set_policy(policy, &new_policy);
674 return ret ? ret : count;
678 * show_scaling_driver - show the cpufreq driver currently loaded
680 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
682 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
686 * show_scaling_available_governors - show the available CPUfreq governors
688 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
692 struct cpufreq_governor *t;
695 i += sprintf(buf, "performance powersave");
699 for_each_governor(t) {
700 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
701 - (CPUFREQ_NAME_LEN + 2)))
703 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
706 i += sprintf(&buf[i], "\n");
710 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
715 for_each_cpu(cpu, mask) {
717 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
718 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
719 if (i >= (PAGE_SIZE - 5))
722 i += sprintf(&buf[i], "\n");
725 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
728 * show_related_cpus - show the CPUs affected by each transition even if
729 * hw coordination is in use
731 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
733 return cpufreq_show_cpus(policy->related_cpus, buf);
737 * show_affected_cpus - show the CPUs affected by each transition
739 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
741 return cpufreq_show_cpus(policy->cpus, buf);
744 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
745 const char *buf, size_t count)
747 unsigned int freq = 0;
750 if (!policy->governor || !policy->governor->store_setspeed)
753 ret = sscanf(buf, "%u", &freq);
757 policy->governor->store_setspeed(policy, freq);
762 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
764 if (!policy->governor || !policy->governor->show_setspeed)
765 return sprintf(buf, "<unsupported>\n");
767 return policy->governor->show_setspeed(policy, buf);
771 * show_bios_limit - show the current cpufreq HW/BIOS limitation
773 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
777 if (cpufreq_driver->bios_limit) {
778 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
780 return sprintf(buf, "%u\n", limit);
782 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
785 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
786 cpufreq_freq_attr_ro(cpuinfo_min_freq);
787 cpufreq_freq_attr_ro(cpuinfo_max_freq);
788 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
789 cpufreq_freq_attr_ro(scaling_available_governors);
790 cpufreq_freq_attr_ro(scaling_driver);
791 cpufreq_freq_attr_ro(scaling_cur_freq);
792 cpufreq_freq_attr_ro(bios_limit);
793 cpufreq_freq_attr_ro(related_cpus);
794 cpufreq_freq_attr_ro(affected_cpus);
795 cpufreq_freq_attr_rw(scaling_min_freq);
796 cpufreq_freq_attr_rw(scaling_max_freq);
797 cpufreq_freq_attr_rw(scaling_governor);
798 cpufreq_freq_attr_rw(scaling_setspeed);
800 static struct attribute *default_attrs[] = {
801 &cpuinfo_min_freq.attr,
802 &cpuinfo_max_freq.attr,
803 &cpuinfo_transition_latency.attr,
804 &scaling_min_freq.attr,
805 &scaling_max_freq.attr,
808 &scaling_governor.attr,
809 &scaling_driver.attr,
810 &scaling_available_governors.attr,
811 &scaling_setspeed.attr,
815 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
816 #define to_attr(a) container_of(a, struct freq_attr, attr)
818 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
820 struct cpufreq_policy *policy = to_policy(kobj);
821 struct freq_attr *fattr = to_attr(attr);
827 down_read(&policy->rwsem);
830 ret = fattr->show(policy, buf);
834 up_read(&policy->rwsem);
839 static ssize_t store(struct kobject *kobj, struct attribute *attr,
840 const char *buf, size_t count)
842 struct cpufreq_policy *policy = to_policy(kobj);
843 struct freq_attr *fattr = to_attr(attr);
844 ssize_t ret = -EINVAL;
851 if (!cpu_online(policy->cpu))
854 down_write(&policy->rwsem);
857 ret = fattr->store(policy, buf, count);
861 up_write(&policy->rwsem);
868 static void cpufreq_sysfs_release(struct kobject *kobj)
870 struct cpufreq_policy *policy = to_policy(kobj);
871 pr_debug("last reference is dropped\n");
872 complete(&policy->kobj_unregister);
875 static const struct sysfs_ops sysfs_ops = {
880 static struct kobj_type ktype_cpufreq = {
881 .sysfs_ops = &sysfs_ops,
882 .default_attrs = default_attrs,
883 .release = cpufreq_sysfs_release,
886 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
888 struct device *cpu_dev;
890 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
895 cpu_dev = get_cpu_device(cpu);
896 if (WARN_ON(!cpu_dev))
899 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
902 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
904 struct device *cpu_dev;
906 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
908 cpu_dev = get_cpu_device(cpu);
909 if (WARN_ON(!cpu_dev))
912 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
915 /* Add/remove symlinks for all related CPUs */
916 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
921 /* Some related CPUs might not be present (physically hotplugged) */
922 for_each_cpu(j, policy->real_cpus) {
923 ret = add_cpu_dev_symlink(policy, j);
931 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
935 /* Some related CPUs might not be present (physically hotplugged) */
936 for_each_cpu(j, policy->real_cpus)
937 remove_cpu_dev_symlink(policy, j);
940 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
942 struct freq_attr **drv_attr;
945 /* set up files for this cpu device */
946 drv_attr = cpufreq_driver->attr;
947 while (drv_attr && *drv_attr) {
948 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
953 if (cpufreq_driver->get) {
954 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
959 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
963 if (cpufreq_driver->bios_limit) {
964 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
969 return cpufreq_add_dev_symlink(policy);
972 static int cpufreq_init_policy(struct cpufreq_policy *policy)
974 struct cpufreq_governor *gov = NULL;
975 struct cpufreq_policy new_policy;
977 memcpy(&new_policy, policy, sizeof(*policy));
979 /* Update governor of new_policy to the governor used before hotplug */
980 gov = find_governor(policy->last_governor);
982 pr_debug("Restoring governor %s for cpu %d\n",
983 policy->governor->name, policy->cpu);
985 gov = CPUFREQ_DEFAULT_GOVERNOR;
987 new_policy.governor = gov;
989 /* Use the default policy if there is no last_policy. */
990 if (cpufreq_driver->setpolicy) {
991 if (policy->last_policy)
992 new_policy.policy = policy->last_policy;
994 cpufreq_parse_governor(gov->name, &new_policy.policy,
997 /* set default policy */
998 return cpufreq_set_policy(policy, &new_policy);
1001 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1005 /* Has this CPU been taken care of already? */
1006 if (cpumask_test_cpu(cpu, policy->cpus))
1010 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1012 pr_err("%s: Failed to stop governor\n", __func__);
1017 down_write(&policy->rwsem);
1018 cpumask_set_cpu(cpu, policy->cpus);
1019 up_write(&policy->rwsem);
1022 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1024 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1027 pr_err("%s: Failed to start governor\n", __func__);
1035 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1037 struct device *dev = get_cpu_device(cpu);
1038 struct cpufreq_policy *policy;
1043 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1047 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1048 goto err_free_policy;
1050 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1051 goto err_free_cpumask;
1053 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1054 goto err_free_rcpumask;
1056 kobject_init(&policy->kobj, &ktype_cpufreq);
1057 INIT_LIST_HEAD(&policy->policy_list);
1058 init_rwsem(&policy->rwsem);
1059 spin_lock_init(&policy->transition_lock);
1060 init_waitqueue_head(&policy->transition_wait);
1061 init_completion(&policy->kobj_unregister);
1062 INIT_WORK(&policy->update, handle_update);
1068 free_cpumask_var(policy->related_cpus);
1070 free_cpumask_var(policy->cpus);
1077 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1079 struct kobject *kobj;
1080 struct completion *cmp;
1083 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1084 CPUFREQ_REMOVE_POLICY, policy);
1086 down_write(&policy->rwsem);
1087 cpufreq_remove_dev_symlink(policy);
1088 kobj = &policy->kobj;
1089 cmp = &policy->kobj_unregister;
1090 up_write(&policy->rwsem);
1094 * We need to make sure that the underlying kobj is
1095 * actually not referenced anymore by anybody before we
1096 * proceed with unloading.
1098 pr_debug("waiting for dropping of refcount\n");
1099 wait_for_completion(cmp);
1100 pr_debug("wait complete\n");
1103 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1105 unsigned long flags;
1108 /* Remove policy from list */
1109 write_lock_irqsave(&cpufreq_driver_lock, flags);
1110 list_del(&policy->policy_list);
1112 for_each_cpu(cpu, policy->related_cpus)
1113 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1114 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1116 cpufreq_policy_put_kobj(policy, notify);
1117 free_cpumask_var(policy->real_cpus);
1118 free_cpumask_var(policy->related_cpus);
1119 free_cpumask_var(policy->cpus);
1123 static int cpufreq_online(unsigned int cpu)
1125 struct cpufreq_policy *policy;
1127 unsigned long flags;
1131 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1133 /* Check if this CPU already has a policy to manage it */
1134 policy = per_cpu(cpufreq_cpu_data, cpu);
1136 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1137 if (!policy_is_inactive(policy))
1138 return cpufreq_add_policy_cpu(policy, cpu);
1140 /* This is the only online CPU for the policy. Start over. */
1142 down_write(&policy->rwsem);
1144 policy->governor = NULL;
1145 up_write(&policy->rwsem);
1148 policy = cpufreq_policy_alloc(cpu);
1153 cpumask_copy(policy->cpus, cpumask_of(cpu));
1155 /* call driver. From then on the cpufreq must be able
1156 * to accept all calls to ->verify and ->setpolicy for this CPU
1158 ret = cpufreq_driver->init(policy);
1160 pr_debug("initialization failed\n");
1161 goto out_free_policy;
1164 down_write(&policy->rwsem);
1167 /* related_cpus should at least include policy->cpus. */
1168 cpumask_copy(policy->related_cpus, policy->cpus);
1169 /* Remember CPUs present at the policy creation time. */
1170 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1172 /* Name and add the kobject */
1173 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1175 cpumask_first(policy->related_cpus));
1177 pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1179 goto out_exit_policy;
1184 * affected cpus must always be the one, which are online. We aren't
1185 * managing offline cpus here.
1187 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1190 policy->user_policy.min = policy->min;
1191 policy->user_policy.max = policy->max;
1193 write_lock_irqsave(&cpufreq_driver_lock, flags);
1194 for_each_cpu(j, policy->related_cpus)
1195 per_cpu(cpufreq_cpu_data, j) = policy;
1196 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1198 policy->min = policy->user_policy.min;
1199 policy->max = policy->user_policy.max;
1202 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1203 policy->cur = cpufreq_driver->get(policy->cpu);
1205 pr_err("%s: ->get() failed\n", __func__);
1206 goto out_exit_policy;
1211 * Sometimes boot loaders set CPU frequency to a value outside of
1212 * frequency table present with cpufreq core. In such cases CPU might be
1213 * unstable if it has to run on that frequency for long duration of time
1214 * and so its better to set it to a frequency which is specified in
1215 * freq-table. This also makes cpufreq stats inconsistent as
1216 * cpufreq-stats would fail to register because current frequency of CPU
1217 * isn't found in freq-table.
1219 * Because we don't want this change to effect boot process badly, we go
1220 * for the next freq which is >= policy->cur ('cur' must be set by now,
1221 * otherwise we will end up setting freq to lowest of the table as 'cur'
1222 * is initialized to zero).
1224 * We are passing target-freq as "policy->cur - 1" otherwise
1225 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1226 * equal to target-freq.
1228 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1230 /* Are we running at unknown frequency ? */
1231 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1232 if (ret == -EINVAL) {
1233 /* Warn user and fix it */
1234 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1235 __func__, policy->cpu, policy->cur);
1236 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1237 CPUFREQ_RELATION_L);
1240 * Reaching here after boot in a few seconds may not
1241 * mean that system will remain stable at "unknown"
1242 * frequency for longer duration. Hence, a BUG_ON().
1245 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1246 __func__, policy->cpu, policy->cur);
1250 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1251 CPUFREQ_START, policy);
1254 ret = cpufreq_add_dev_interface(policy);
1256 goto out_exit_policy;
1257 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1258 CPUFREQ_CREATE_POLICY, policy);
1260 write_lock_irqsave(&cpufreq_driver_lock, flags);
1261 list_add(&policy->policy_list, &cpufreq_policy_list);
1262 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1265 ret = cpufreq_init_policy(policy);
1267 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1268 __func__, cpu, ret);
1269 /* cpufreq_policy_free() will notify based on this */
1271 goto out_exit_policy;
1274 up_write(&policy->rwsem);
1276 kobject_uevent(&policy->kobj, KOBJ_ADD);
1278 /* Callback for handling stuff after policy is ready */
1279 if (cpufreq_driver->ready)
1280 cpufreq_driver->ready(policy);
1282 pr_debug("initialization complete\n");
1287 up_write(&policy->rwsem);
1289 if (cpufreq_driver->exit)
1290 cpufreq_driver->exit(policy);
1292 cpufreq_policy_free(policy, !new_policy);
1297 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1299 * @sif: Subsystem interface structure pointer (not used)
1301 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1303 unsigned cpu = dev->id;
1306 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1308 if (cpu_online(cpu)) {
1309 ret = cpufreq_online(cpu);
1312 * A hotplug notifier will follow and we will handle it as CPU
1313 * online then. For now, just create the sysfs link, unless
1314 * there is no policy or the link is already present.
1316 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1318 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1319 ? add_cpu_dev_symlink(policy, cpu) : 0;
1325 static void cpufreq_offline_prepare(unsigned int cpu)
1327 struct cpufreq_policy *policy;
1329 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1331 policy = cpufreq_cpu_get_raw(cpu);
1333 pr_debug("%s: No cpu_data found\n", __func__);
1338 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1340 pr_err("%s: Failed to stop governor\n", __func__);
1343 down_write(&policy->rwsem);
1344 cpumask_clear_cpu(cpu, policy->cpus);
1346 if (policy_is_inactive(policy)) {
1348 strncpy(policy->last_governor, policy->governor->name,
1351 policy->last_policy = policy->policy;
1352 } else if (cpu == policy->cpu) {
1353 /* Nominate new CPU */
1354 policy->cpu = cpumask_any(policy->cpus);
1356 up_write(&policy->rwsem);
1358 /* Start governor again for active policy */
1359 if (!policy_is_inactive(policy)) {
1361 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1363 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1366 pr_err("%s: Failed to start governor\n", __func__);
1368 } else if (cpufreq_driver->stop_cpu) {
1369 cpufreq_driver->stop_cpu(policy);
1373 static void cpufreq_offline_finish(unsigned int cpu)
1375 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1378 pr_debug("%s: No cpu_data found\n", __func__);
1382 /* Only proceed for inactive policies */
1383 if (!policy_is_inactive(policy))
1386 /* If cpu is last user of policy, free policy */
1388 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1390 pr_err("%s: Failed to exit governor\n", __func__);
1394 * Perform the ->exit() even during light-weight tear-down,
1395 * since this is a core component, and is essential for the
1396 * subsequent light-weight ->init() to succeed.
1398 if (cpufreq_driver->exit) {
1399 cpufreq_driver->exit(policy);
1400 policy->freq_table = NULL;
1405 * cpufreq_remove_dev - remove a CPU device
1407 * Removes the cpufreq interface for a CPU device.
1409 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1411 unsigned int cpu = dev->id;
1412 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1417 if (cpu_online(cpu)) {
1418 cpufreq_offline_prepare(cpu);
1419 cpufreq_offline_finish(cpu);
1422 cpumask_clear_cpu(cpu, policy->real_cpus);
1423 remove_cpu_dev_symlink(policy, cpu);
1425 if (cpumask_empty(policy->real_cpus))
1426 cpufreq_policy_free(policy, true);
1429 static void handle_update(struct work_struct *work)
1431 struct cpufreq_policy *policy =
1432 container_of(work, struct cpufreq_policy, update);
1433 unsigned int cpu = policy->cpu;
1434 pr_debug("handle_update for cpu %u called\n", cpu);
1435 cpufreq_update_policy(cpu);
1439 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1441 * @policy: policy managing CPUs
1442 * @new_freq: CPU frequency the CPU actually runs at
1444 * We adjust to current frequency first, and need to clean up later.
1445 * So either call to cpufreq_update_policy() or schedule handle_update()).
1447 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1448 unsigned int new_freq)
1450 struct cpufreq_freqs freqs;
1452 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1453 policy->cur, new_freq);
1455 freqs.old = policy->cur;
1456 freqs.new = new_freq;
1458 cpufreq_freq_transition_begin(policy, &freqs);
1459 cpufreq_freq_transition_end(policy, &freqs, 0);
1463 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1466 * This is the last known freq, without actually getting it from the driver.
1467 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1469 unsigned int cpufreq_quick_get(unsigned int cpu)
1471 struct cpufreq_policy *policy;
1472 unsigned int ret_freq = 0;
1474 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1475 return cpufreq_driver->get(cpu);
1477 policy = cpufreq_cpu_get(cpu);
1479 ret_freq = policy->cur;
1480 cpufreq_cpu_put(policy);
1485 EXPORT_SYMBOL(cpufreq_quick_get);
1488 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1491 * Just return the max possible frequency for a given CPU.
1493 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1495 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1496 unsigned int ret_freq = 0;
1499 ret_freq = policy->max;
1500 cpufreq_cpu_put(policy);
1505 EXPORT_SYMBOL(cpufreq_quick_get_max);
1507 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1509 unsigned int ret_freq = 0;
1511 if (!cpufreq_driver->get)
1514 ret_freq = cpufreq_driver->get(policy->cpu);
1516 /* Updating inactive policies is invalid, so avoid doing that. */
1517 if (unlikely(policy_is_inactive(policy)))
1520 if (ret_freq && policy->cur &&
1521 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1522 /* verify no discrepancy between actual and
1523 saved value exists */
1524 if (unlikely(ret_freq != policy->cur)) {
1525 cpufreq_out_of_sync(policy, ret_freq);
1526 schedule_work(&policy->update);
1534 * cpufreq_get - get the current CPU frequency (in kHz)
1537 * Get the CPU current (static) CPU frequency
1539 unsigned int cpufreq_get(unsigned int cpu)
1541 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1542 unsigned int ret_freq = 0;
1545 down_read(&policy->rwsem);
1546 ret_freq = __cpufreq_get(policy);
1547 up_read(&policy->rwsem);
1549 cpufreq_cpu_put(policy);
1554 EXPORT_SYMBOL(cpufreq_get);
1556 static struct subsys_interface cpufreq_interface = {
1558 .subsys = &cpu_subsys,
1559 .add_dev = cpufreq_add_dev,
1560 .remove_dev = cpufreq_remove_dev,
1564 * In case platform wants some specific frequency to be configured
1567 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1571 if (!policy->suspend_freq) {
1572 pr_debug("%s: suspend_freq not defined\n", __func__);
1576 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1577 policy->suspend_freq);
1579 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1580 CPUFREQ_RELATION_H);
1582 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1583 __func__, policy->suspend_freq, ret);
1587 EXPORT_SYMBOL(cpufreq_generic_suspend);
1590 * cpufreq_suspend() - Suspend CPUFreq governors
1592 * Called during system wide Suspend/Hibernate cycles for suspending governors
1593 * as some platforms can't change frequency after this point in suspend cycle.
1594 * Because some of the devices (like: i2c, regulators, etc) they use for
1595 * changing frequency are suspended quickly after this point.
1597 void cpufreq_suspend(void)
1599 struct cpufreq_policy *policy;
1601 if (!cpufreq_driver)
1607 pr_debug("%s: Suspending Governors\n", __func__);
1609 for_each_active_policy(policy) {
1610 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1611 pr_err("%s: Failed to stop governor for policy: %p\n",
1613 else if (cpufreq_driver->suspend
1614 && cpufreq_driver->suspend(policy))
1615 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1620 cpufreq_suspended = true;
1624 * cpufreq_resume() - Resume CPUFreq governors
1626 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1627 * are suspended with cpufreq_suspend().
1629 void cpufreq_resume(void)
1631 struct cpufreq_policy *policy;
1633 if (!cpufreq_driver)
1636 if (unlikely(!cpufreq_suspended))
1639 cpufreq_suspended = false;
1644 pr_debug("%s: Resuming Governors\n", __func__);
1646 for_each_active_policy(policy) {
1647 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1648 pr_err("%s: Failed to resume driver: %p\n", __func__,
1650 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1651 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1652 pr_err("%s: Failed to start governor for policy: %p\n",
1657 * schedule call cpufreq_update_policy() for first-online CPU, as that
1658 * wouldn't be hotplugged-out on suspend. It will verify that the
1659 * current freq is in sync with what we believe it to be.
1661 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1662 if (WARN_ON(!policy))
1665 schedule_work(&policy->update);
1669 * cpufreq_get_current_driver - return current driver's name
1671 * Return the name string of the currently loaded cpufreq driver
1674 const char *cpufreq_get_current_driver(void)
1677 return cpufreq_driver->name;
1681 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1684 * cpufreq_get_driver_data - return current driver data
1686 * Return the private data of the currently loaded cpufreq
1687 * driver, or NULL if no cpufreq driver is loaded.
1689 void *cpufreq_get_driver_data(void)
1692 return cpufreq_driver->driver_data;
1696 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1698 /*********************************************************************
1699 * NOTIFIER LISTS INTERFACE *
1700 *********************************************************************/
1703 * cpufreq_register_notifier - register a driver with cpufreq
1704 * @nb: notifier function to register
1705 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1707 * Add a driver to one of two lists: either a list of drivers that
1708 * are notified about clock rate changes (once before and once after
1709 * the transition), or a list of drivers that are notified about
1710 * changes in cpufreq policy.
1712 * This function may sleep, and has the same return conditions as
1713 * blocking_notifier_chain_register.
1715 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1719 if (cpufreq_disabled())
1722 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1725 case CPUFREQ_TRANSITION_NOTIFIER:
1726 ret = srcu_notifier_chain_register(
1727 &cpufreq_transition_notifier_list, nb);
1729 case CPUFREQ_POLICY_NOTIFIER:
1730 ret = blocking_notifier_chain_register(
1731 &cpufreq_policy_notifier_list, nb);
1739 EXPORT_SYMBOL(cpufreq_register_notifier);
1742 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1743 * @nb: notifier block to be unregistered
1744 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1746 * Remove a driver from the CPU frequency notifier list.
1748 * This function may sleep, and has the same return conditions as
1749 * blocking_notifier_chain_unregister.
1751 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1755 if (cpufreq_disabled())
1759 case CPUFREQ_TRANSITION_NOTIFIER:
1760 ret = srcu_notifier_chain_unregister(
1761 &cpufreq_transition_notifier_list, nb);
1763 case CPUFREQ_POLICY_NOTIFIER:
1764 ret = blocking_notifier_chain_unregister(
1765 &cpufreq_policy_notifier_list, nb);
1773 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1776 /*********************************************************************
1778 *********************************************************************/
1780 /* Must set freqs->new to intermediate frequency */
1781 static int __target_intermediate(struct cpufreq_policy *policy,
1782 struct cpufreq_freqs *freqs, int index)
1786 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1788 /* We don't need to switch to intermediate freq */
1792 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1793 __func__, policy->cpu, freqs->old, freqs->new);
1795 cpufreq_freq_transition_begin(policy, freqs);
1796 ret = cpufreq_driver->target_intermediate(policy, index);
1797 cpufreq_freq_transition_end(policy, freqs, ret);
1800 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1806 static int __target_index(struct cpufreq_policy *policy,
1807 struct cpufreq_frequency_table *freq_table, int index)
1809 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1810 unsigned int intermediate_freq = 0;
1811 int retval = -EINVAL;
1814 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1816 /* Handle switching to intermediate frequency */
1817 if (cpufreq_driver->get_intermediate) {
1818 retval = __target_intermediate(policy, &freqs, index);
1822 intermediate_freq = freqs.new;
1823 /* Set old freq to intermediate */
1824 if (intermediate_freq)
1825 freqs.old = freqs.new;
1828 freqs.new = freq_table[index].frequency;
1829 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1830 __func__, policy->cpu, freqs.old, freqs.new);
1832 cpufreq_freq_transition_begin(policy, &freqs);
1835 retval = cpufreq_driver->target_index(policy, index);
1837 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1841 cpufreq_freq_transition_end(policy, &freqs, retval);
1844 * Failed after setting to intermediate freq? Driver should have
1845 * reverted back to initial frequency and so should we. Check
1846 * here for intermediate_freq instead of get_intermediate, in
1847 * case we haven't switched to intermediate freq at all.
1849 if (unlikely(retval && intermediate_freq)) {
1850 freqs.old = intermediate_freq;
1851 freqs.new = policy->restore_freq;
1852 cpufreq_freq_transition_begin(policy, &freqs);
1853 cpufreq_freq_transition_end(policy, &freqs, 0);
1860 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1861 unsigned int target_freq,
1862 unsigned int relation)
1864 unsigned int old_target_freq = target_freq;
1865 int retval = -EINVAL;
1867 if (cpufreq_disabled())
1870 /* Make sure that target_freq is within supported range */
1871 if (target_freq > policy->max)
1872 target_freq = policy->max;
1873 if (target_freq < policy->min)
1874 target_freq = policy->min;
1876 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1877 policy->cpu, target_freq, relation, old_target_freq);
1880 * This might look like a redundant call as we are checking it again
1881 * after finding index. But it is left intentionally for cases where
1882 * exactly same freq is called again and so we can save on few function
1885 if (target_freq == policy->cur)
1888 /* Save last value to restore later on errors */
1889 policy->restore_freq = policy->cur;
1891 if (cpufreq_driver->target)
1892 retval = cpufreq_driver->target(policy, target_freq, relation);
1893 else if (cpufreq_driver->target_index) {
1894 struct cpufreq_frequency_table *freq_table;
1897 freq_table = cpufreq_frequency_get_table(policy->cpu);
1898 if (unlikely(!freq_table)) {
1899 pr_err("%s: Unable to find freq_table\n", __func__);
1903 retval = cpufreq_frequency_table_target(policy, freq_table,
1904 target_freq, relation, &index);
1905 if (unlikely(retval)) {
1906 pr_err("%s: Unable to find matching freq\n", __func__);
1910 if (freq_table[index].frequency == policy->cur) {
1915 retval = __target_index(policy, freq_table, index);
1921 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1923 int cpufreq_driver_target(struct cpufreq_policy *policy,
1924 unsigned int target_freq,
1925 unsigned int relation)
1929 down_write(&policy->rwsem);
1931 ret = __cpufreq_driver_target(policy, target_freq, relation);
1933 up_write(&policy->rwsem);
1937 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1939 static int __cpufreq_governor(struct cpufreq_policy *policy,
1944 /* Only must be defined when default governor is known to have latency
1945 restrictions, like e.g. conservative or ondemand.
1946 That this is the case is already ensured in Kconfig
1948 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1949 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1951 struct cpufreq_governor *gov = NULL;
1954 /* Don't start any governor operations if we are entering suspend */
1955 if (cpufreq_suspended)
1958 * Governor might not be initiated here if ACPI _PPC changed
1959 * notification happened, so check it.
1961 if (!policy->governor)
1964 if (policy->governor->max_transition_latency &&
1965 policy->cpuinfo.transition_latency >
1966 policy->governor->max_transition_latency) {
1970 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1971 policy->governor->name, gov->name);
1972 policy->governor = gov;
1976 if (event == CPUFREQ_GOV_POLICY_INIT)
1977 if (!try_module_get(policy->governor->owner))
1980 pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
1982 mutex_lock(&cpufreq_governor_lock);
1983 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1984 || (!policy->governor_enabled
1985 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1986 mutex_unlock(&cpufreq_governor_lock);
1990 if (event == CPUFREQ_GOV_STOP)
1991 policy->governor_enabled = false;
1992 else if (event == CPUFREQ_GOV_START)
1993 policy->governor_enabled = true;
1995 mutex_unlock(&cpufreq_governor_lock);
1997 ret = policy->governor->governor(policy, event);
2000 if (event == CPUFREQ_GOV_POLICY_INIT)
2001 policy->governor->initialized++;
2002 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2003 policy->governor->initialized--;
2005 /* Restore original values */
2006 mutex_lock(&cpufreq_governor_lock);
2007 if (event == CPUFREQ_GOV_STOP)
2008 policy->governor_enabled = true;
2009 else if (event == CPUFREQ_GOV_START)
2010 policy->governor_enabled = false;
2011 mutex_unlock(&cpufreq_governor_lock);
2014 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2015 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2016 module_put(policy->governor->owner);
2021 int cpufreq_register_governor(struct cpufreq_governor *governor)
2028 if (cpufreq_disabled())
2031 mutex_lock(&cpufreq_governor_mutex);
2033 governor->initialized = 0;
2035 if (!find_governor(governor->name)) {
2037 list_add(&governor->governor_list, &cpufreq_governor_list);
2040 mutex_unlock(&cpufreq_governor_mutex);
2043 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2045 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2047 struct cpufreq_policy *policy;
2048 unsigned long flags;
2053 if (cpufreq_disabled())
2056 /* clear last_governor for all inactive policies */
2057 read_lock_irqsave(&cpufreq_driver_lock, flags);
2058 for_each_inactive_policy(policy) {
2059 if (!strcmp(policy->last_governor, governor->name)) {
2060 policy->governor = NULL;
2061 strcpy(policy->last_governor, "\0");
2064 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2066 mutex_lock(&cpufreq_governor_mutex);
2067 list_del(&governor->governor_list);
2068 mutex_unlock(&cpufreq_governor_mutex);
2071 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2074 /*********************************************************************
2075 * POLICY INTERFACE *
2076 *********************************************************************/
2079 * cpufreq_get_policy - get the current cpufreq_policy
2080 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2083 * Reads the current cpufreq policy.
2085 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2087 struct cpufreq_policy *cpu_policy;
2091 cpu_policy = cpufreq_cpu_get(cpu);
2095 memcpy(policy, cpu_policy, sizeof(*policy));
2097 cpufreq_cpu_put(cpu_policy);
2100 EXPORT_SYMBOL(cpufreq_get_policy);
2103 * policy : current policy.
2104 * new_policy: policy to be set.
2106 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2107 struct cpufreq_policy *new_policy)
2109 struct cpufreq_governor *old_gov;
2112 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2113 new_policy->cpu, new_policy->min, new_policy->max);
2115 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2118 * This check works well when we store new min/max freq attributes,
2119 * because new_policy is a copy of policy with one field updated.
2121 if (new_policy->min > new_policy->max)
2124 /* verify the cpu speed can be set within this limit */
2125 ret = cpufreq_driver->verify(new_policy);
2129 /* adjust if necessary - all reasons */
2130 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2131 CPUFREQ_ADJUST, new_policy);
2134 * verify the cpu speed can be set within this limit, which might be
2135 * different to the first one
2137 ret = cpufreq_driver->verify(new_policy);
2141 /* notification of the new policy */
2142 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2143 CPUFREQ_NOTIFY, new_policy);
2145 policy->min = new_policy->min;
2146 policy->max = new_policy->max;
2148 pr_debug("new min and max freqs are %u - %u kHz\n",
2149 policy->min, policy->max);
2151 if (cpufreq_driver->setpolicy) {
2152 policy->policy = new_policy->policy;
2153 pr_debug("setting range\n");
2154 return cpufreq_driver->setpolicy(new_policy);
2157 if (new_policy->governor == policy->governor)
2160 pr_debug("governor switch\n");
2162 /* save old, working values */
2163 old_gov = policy->governor;
2164 /* end old governor */
2166 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2168 /* This can happen due to race with other operations */
2169 pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2170 __func__, old_gov->name, ret);
2174 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2176 pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2177 __func__, old_gov->name, ret);
2182 /* start new governor */
2183 policy->governor = new_policy->governor;
2184 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2186 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2190 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2193 /* new governor failed, so re-start old one */
2194 pr_debug("starting governor %s failed\n", policy->governor->name);
2196 policy->governor = old_gov;
2197 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2198 policy->governor = NULL;
2200 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2206 pr_debug("governor: change or update limits\n");
2207 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2211 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2212 * @cpu: CPU which shall be re-evaluated
2214 * Useful for policy notifiers which have different necessities
2215 * at different times.
2217 int cpufreq_update_policy(unsigned int cpu)
2219 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2220 struct cpufreq_policy new_policy;
2226 down_write(&policy->rwsem);
2228 pr_debug("updating policy for CPU %u\n", cpu);
2229 memcpy(&new_policy, policy, sizeof(*policy));
2230 new_policy.min = policy->user_policy.min;
2231 new_policy.max = policy->user_policy.max;
2234 * BIOS might change freq behind our back
2235 * -> ask driver for current freq and notify governors about a change
2237 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2238 new_policy.cur = cpufreq_driver->get(cpu);
2239 if (WARN_ON(!new_policy.cur)) {
2245 pr_debug("Driver did not initialize current freq\n");
2246 policy->cur = new_policy.cur;
2248 if (policy->cur != new_policy.cur && has_target())
2249 cpufreq_out_of_sync(policy, new_policy.cur);
2253 ret = cpufreq_set_policy(policy, &new_policy);
2256 up_write(&policy->rwsem);
2258 cpufreq_cpu_put(policy);
2261 EXPORT_SYMBOL(cpufreq_update_policy);
2263 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2264 unsigned long action, void *hcpu)
2266 unsigned int cpu = (unsigned long)hcpu;
2268 switch (action & ~CPU_TASKS_FROZEN) {
2270 cpufreq_online(cpu);
2273 case CPU_DOWN_PREPARE:
2274 cpufreq_offline_prepare(cpu);
2278 cpufreq_offline_finish(cpu);
2281 case CPU_DOWN_FAILED:
2282 cpufreq_online(cpu);
2288 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2289 .notifier_call = cpufreq_cpu_callback,
2292 /*********************************************************************
2294 *********************************************************************/
2295 static int cpufreq_boost_set_sw(int state)
2297 struct cpufreq_frequency_table *freq_table;
2298 struct cpufreq_policy *policy;
2301 for_each_active_policy(policy) {
2302 freq_table = cpufreq_frequency_get_table(policy->cpu);
2304 ret = cpufreq_frequency_table_cpuinfo(policy,
2307 pr_err("%s: Policy frequency update failed\n",
2311 policy->user_policy.max = policy->max;
2312 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2319 int cpufreq_boost_trigger_state(int state)
2321 unsigned long flags;
2324 if (cpufreq_driver->boost_enabled == state)
2327 write_lock_irqsave(&cpufreq_driver_lock, flags);
2328 cpufreq_driver->boost_enabled = state;
2329 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2331 ret = cpufreq_driver->set_boost(state);
2333 write_lock_irqsave(&cpufreq_driver_lock, flags);
2334 cpufreq_driver->boost_enabled = !state;
2335 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2337 pr_err("%s: Cannot %s BOOST\n",
2338 __func__, state ? "enable" : "disable");
2344 int cpufreq_boost_supported(void)
2346 if (likely(cpufreq_driver))
2347 return cpufreq_driver->boost_supported;
2351 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2353 static int create_boost_sysfs_file(void)
2357 if (!cpufreq_boost_supported())
2361 * Check if driver provides function to enable boost -
2362 * if not, use cpufreq_boost_set_sw as default
2364 if (!cpufreq_driver->set_boost)
2365 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2367 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2369 pr_err("%s: cannot register global BOOST sysfs file\n",
2375 static void remove_boost_sysfs_file(void)
2377 if (cpufreq_boost_supported())
2378 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2381 int cpufreq_enable_boost_support(void)
2383 if (!cpufreq_driver)
2386 if (cpufreq_boost_supported())
2389 cpufreq_driver->boost_supported = true;
2391 /* This will get removed on driver unregister */
2392 return create_boost_sysfs_file();
2394 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2396 int cpufreq_boost_enabled(void)
2398 return cpufreq_driver->boost_enabled;
2400 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2402 /*********************************************************************
2403 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2404 *********************************************************************/
2407 * cpufreq_register_driver - register a CPU Frequency driver
2408 * @driver_data: A struct cpufreq_driver containing the values#
2409 * submitted by the CPU Frequency driver.
2411 * Registers a CPU Frequency driver to this core code. This code
2412 * returns zero on success, -EBUSY when another driver got here first
2413 * (and isn't unregistered in the meantime).
2416 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2418 unsigned long flags;
2421 if (cpufreq_disabled())
2425 * The cpufreq core depends heavily on the availability of device
2426 * structure, make sure they are available before proceeding further.
2428 if (!get_cpu_device(0))
2429 return -EPROBE_DEFER;
2431 if (!driver_data || !driver_data->verify || !driver_data->init ||
2432 !(driver_data->setpolicy || driver_data->target_index ||
2433 driver_data->target) ||
2434 (driver_data->setpolicy && (driver_data->target_index ||
2435 driver_data->target)) ||
2436 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2439 pr_debug("trying to register driver %s\n", driver_data->name);
2441 /* Protect against concurrent CPU online/offline. */
2444 write_lock_irqsave(&cpufreq_driver_lock, flags);
2445 if (cpufreq_driver) {
2446 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2450 cpufreq_driver = driver_data;
2451 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2453 if (driver_data->setpolicy)
2454 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2456 ret = create_boost_sysfs_file();
2458 goto err_null_driver;
2460 ret = subsys_interface_register(&cpufreq_interface);
2462 goto err_boost_unreg;
2464 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2465 list_empty(&cpufreq_policy_list)) {
2466 /* if all ->init() calls failed, unregister */
2468 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2473 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2474 pr_debug("driver %s up and running\n", driver_data->name);
2481 subsys_interface_unregister(&cpufreq_interface);
2483 remove_boost_sysfs_file();
2485 write_lock_irqsave(&cpufreq_driver_lock, flags);
2486 cpufreq_driver = NULL;
2487 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2490 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2493 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2495 * Unregister the current CPUFreq driver. Only call this if you have
2496 * the right to do so, i.e. if you have succeeded in initialising before!
2497 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2498 * currently not initialised.
2500 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2502 unsigned long flags;
2504 if (!cpufreq_driver || (driver != cpufreq_driver))
2507 pr_debug("unregistering driver %s\n", driver->name);
2509 /* Protect against concurrent cpu hotplug */
2511 subsys_interface_unregister(&cpufreq_interface);
2512 remove_boost_sysfs_file();
2513 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2515 write_lock_irqsave(&cpufreq_driver_lock, flags);
2517 cpufreq_driver = NULL;
2519 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2524 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2526 struct kobject *cpufreq_global_kobject;
2527 EXPORT_SYMBOL(cpufreq_global_kobject);
2529 static int __init cpufreq_core_init(void)
2531 if (cpufreq_disabled())
2534 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2535 BUG_ON(!cpufreq_global_kobject);
2539 core_initcall(cpufreq_core_init);