2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cpufreq.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/slab.h>
18 #include <trace/events/power.h>
22 #define SUGOV_KTHREAD_PRIORITY 50
24 struct sugov_tunables {
25 struct gov_attr_set attr_set;
26 unsigned int rate_limit_us;
30 struct cpufreq_policy *policy;
32 struct sugov_tunables *tunables;
33 struct list_head tunables_hook;
35 raw_spinlock_t update_lock; /* For shared policies */
36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns;
38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
41 /* The next fields are only needed if fast switch cannot be used. */
42 struct irq_work irq_work;
43 struct kthread_work work;
44 struct mutex work_lock;
45 struct kthread_worker worker;
46 struct task_struct *thread;
47 bool work_in_progress;
49 bool need_freq_update;
53 struct update_util_data update_util;
54 struct sugov_policy *sg_policy;
57 bool iowait_boost_pending;
58 unsigned int iowait_boost;
59 unsigned int iowait_boost_max;
62 /* The fields below are only needed when sharing a policy. */
67 /* The field below is for single-CPU policies only. */
68 #ifdef CONFIG_NO_HZ_COMMON
69 unsigned long saved_idle_calls;
73 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
75 /************************ Governor internals ***********************/
77 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
82 * Since cpufreq_update_util() is called with rq->lock held for
83 * the @target_cpu, our per-cpu data is fully serialized.
85 * However, drivers cannot in general deal with cross-cpu
86 * requests, so while get_next_freq() will work, our
87 * sugov_update_commit() call may not for the fast switching platforms.
89 * Hence stop here for remote requests if they aren't supported
90 * by the hardware, as calculating the frequency is pointless if
91 * we cannot in fact act on it.
93 * For the slow switching platforms, the kthread is always scheduled on
94 * the right set of CPUs and any CPU can find the next frequency and
95 * schedule the kthread.
97 if (sg_policy->policy->fast_switch_enabled &&
98 !cpufreq_can_do_remote_dvfs(sg_policy->policy))
101 if (sg_policy->work_in_progress)
104 if (unlikely(sg_policy->need_freq_update)) {
105 sg_policy->need_freq_update = false;
107 * This happens when limits change, so forget the previous
108 * next_freq value and force an update.
110 sg_policy->next_freq = UINT_MAX;
114 delta_ns = time - sg_policy->last_freq_update_time;
115 return delta_ns >= sg_policy->freq_update_delay_ns;
118 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
119 unsigned int next_freq)
121 struct cpufreq_policy *policy = sg_policy->policy;
123 if (sg_policy->next_freq == next_freq)
126 sg_policy->next_freq = next_freq;
127 sg_policy->last_freq_update_time = time;
129 if (policy->fast_switch_enabled) {
130 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
134 policy->cur = next_freq;
135 trace_cpu_frequency(next_freq, smp_processor_id());
137 sg_policy->work_in_progress = true;
138 irq_work_queue(&sg_policy->irq_work);
143 * get_next_freq - Compute a new frequency for a given cpufreq policy.
144 * @sg_policy: schedutil policy object to compute the new frequency for.
145 * @util: Current CPU utilization.
146 * @max: CPU capacity.
148 * If the utilization is frequency-invariant, choose the new frequency to be
149 * proportional to it, that is
151 * next_freq = C * max_freq * util / max
153 * Otherwise, approximate the would-be frequency-invariant utilization by
154 * util_raw * (curr_freq / max_freq) which leads to
156 * next_freq = C * curr_freq * util_raw / max
158 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
160 * The lowest driver-supported frequency which is equal or greater than the raw
161 * next_freq (as calculated above) is returned, subject to policy min/max and
162 * cpufreq driver limitations.
164 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
165 unsigned long util, unsigned long max)
167 struct cpufreq_policy *policy = sg_policy->policy;
168 unsigned int freq = arch_scale_freq_invariant() ?
169 policy->cpuinfo.max_freq : policy->cur;
171 freq = (freq + (freq >> 2)) * util / max;
173 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
174 return sg_policy->next_freq;
175 sg_policy->cached_raw_freq = freq;
176 return cpufreq_driver_resolve_freq(policy, freq);
179 static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
181 struct rq *rq = cpu_rq(cpu);
182 unsigned long cfs_max;
184 cfs_max = arch_scale_cpu_capacity(NULL, cpu);
186 *util = min(rq->cfs.avg.util_avg, cfs_max);
190 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
193 if (flags & SCHED_CPUFREQ_IOWAIT) {
194 if (sg_cpu->iowait_boost_pending)
197 sg_cpu->iowait_boost_pending = true;
199 if (sg_cpu->iowait_boost) {
200 sg_cpu->iowait_boost <<= 1;
201 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
202 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
204 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
206 } else if (sg_cpu->iowait_boost) {
207 s64 delta_ns = time - sg_cpu->last_update;
209 /* Clear iowait_boost if the CPU apprears to have been idle. */
210 if (delta_ns > TICK_NSEC) {
211 sg_cpu->iowait_boost = 0;
212 sg_cpu->iowait_boost_pending = false;
217 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
220 unsigned int boost_util, boost_max;
222 if (!sg_cpu->iowait_boost)
225 if (sg_cpu->iowait_boost_pending) {
226 sg_cpu->iowait_boost_pending = false;
228 sg_cpu->iowait_boost >>= 1;
229 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
230 sg_cpu->iowait_boost = 0;
235 boost_util = sg_cpu->iowait_boost;
236 boost_max = sg_cpu->iowait_boost_max;
238 if (*util * boost_max < *max * boost_util) {
244 #ifdef CONFIG_NO_HZ_COMMON
245 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
247 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
248 bool ret = idle_calls == sg_cpu->saved_idle_calls;
250 sg_cpu->saved_idle_calls = idle_calls;
254 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
255 #endif /* CONFIG_NO_HZ_COMMON */
257 static void sugov_update_single(struct update_util_data *hook, u64 time,
260 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
261 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
262 struct cpufreq_policy *policy = sg_policy->policy;
263 unsigned long util, max;
267 sugov_set_iowait_boost(sg_cpu, time, flags);
268 sg_cpu->last_update = time;
270 if (!sugov_should_update_freq(sg_policy, time))
273 busy = sugov_cpu_is_busy(sg_cpu);
275 if (flags & SCHED_CPUFREQ_RT_DL) {
276 next_f = policy->cpuinfo.max_freq;
278 sugov_get_util(&util, &max, sg_cpu->cpu);
279 sugov_iowait_boost(sg_cpu, &util, &max);
280 next_f = get_next_freq(sg_policy, util, max);
282 * Do not reduce the frequency if the CPU has not been idle
283 * recently, as the reduction is likely to be premature then.
285 if (busy && next_f < sg_policy->next_freq &&
286 sg_policy->next_freq != UINT_MAX) {
287 next_f = sg_policy->next_freq;
289 /* Reset cached freq as next_freq has changed */
290 sg_policy->cached_raw_freq = 0;
293 sugov_update_commit(sg_policy, time, next_f);
296 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
298 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
299 struct cpufreq_policy *policy = sg_policy->policy;
300 unsigned long util = 0, max = 1;
303 for_each_cpu(j, policy->cpus) {
304 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
305 unsigned long j_util, j_max;
309 * If the CPU utilization was last updated before the previous
310 * frequency update and the time elapsed between the last update
311 * of the CPU utilization and the last frequency update is long
312 * enough, don't take the CPU into account as it probably is
313 * idle now (and clear iowait_boost for it).
315 delta_ns = time - j_sg_cpu->last_update;
316 if (delta_ns > TICK_NSEC) {
317 j_sg_cpu->iowait_boost = 0;
318 j_sg_cpu->iowait_boost_pending = false;
321 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
322 return policy->cpuinfo.max_freq;
324 j_util = j_sg_cpu->util;
325 j_max = j_sg_cpu->max;
326 if (j_util * max > j_max * util) {
331 sugov_iowait_boost(j_sg_cpu, &util, &max);
334 return get_next_freq(sg_policy, util, max);
337 static void sugov_update_shared(struct update_util_data *hook, u64 time,
340 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
341 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
342 unsigned long util, max;
345 sugov_get_util(&util, &max, sg_cpu->cpu);
347 raw_spin_lock(&sg_policy->update_lock);
351 sg_cpu->flags = flags;
353 sugov_set_iowait_boost(sg_cpu, time, flags);
354 sg_cpu->last_update = time;
356 if (sugov_should_update_freq(sg_policy, time)) {
357 if (flags & SCHED_CPUFREQ_RT_DL)
358 next_f = sg_policy->policy->cpuinfo.max_freq;
360 next_f = sugov_next_freq_shared(sg_cpu, time);
362 sugov_update_commit(sg_policy, time, next_f);
365 raw_spin_unlock(&sg_policy->update_lock);
368 static void sugov_work(struct kthread_work *work)
370 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
372 mutex_lock(&sg_policy->work_lock);
373 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
375 mutex_unlock(&sg_policy->work_lock);
377 sg_policy->work_in_progress = false;
380 static void sugov_irq_work(struct irq_work *irq_work)
382 struct sugov_policy *sg_policy;
384 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
387 * For RT and deadline tasks, the schedutil governor shoots the
388 * frequency to maximum. Special care must be taken to ensure that this
389 * kthread doesn't result in the same behavior.
391 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
392 * updated only at the end of the sugov_work() function and before that
393 * the schedutil governor rejects all other frequency scaling requests.
395 * There is a very rare case though, where the RT thread yields right
396 * after the work_in_progress flag is cleared. The effects of that are
399 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
402 /************************** sysfs interface ************************/
404 static struct sugov_tunables *global_tunables;
405 static DEFINE_MUTEX(global_tunables_lock);
407 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
409 return container_of(attr_set, struct sugov_tunables, attr_set);
412 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
414 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
416 return sprintf(buf, "%u\n", tunables->rate_limit_us);
419 static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
422 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
423 struct sugov_policy *sg_policy;
424 unsigned int rate_limit_us;
426 if (kstrtouint(buf, 10, &rate_limit_us))
429 tunables->rate_limit_us = rate_limit_us;
431 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
432 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
437 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
439 static struct attribute *sugov_attributes[] = {
444 static void sugov_tunables_free(struct kobject *kobj)
446 struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
448 kfree(to_sugov_tunables(attr_set));
451 static struct kobj_type sugov_tunables_ktype = {
452 .default_attrs = sugov_attributes,
453 .sysfs_ops = &governor_sysfs_ops,
454 .release = &sugov_tunables_free,
457 /********************** cpufreq governor interface *********************/
459 static struct cpufreq_governor schedutil_gov;
461 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
463 struct sugov_policy *sg_policy;
465 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
469 sg_policy->policy = policy;
470 raw_spin_lock_init(&sg_policy->update_lock);
474 static void sugov_policy_free(struct sugov_policy *sg_policy)
479 static int sugov_kthread_create(struct sugov_policy *sg_policy)
481 struct task_struct *thread;
482 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
483 struct cpufreq_policy *policy = sg_policy->policy;
486 /* kthread only required for slow path */
487 if (policy->fast_switch_enabled)
490 kthread_init_work(&sg_policy->work, sugov_work);
491 kthread_init_worker(&sg_policy->worker);
492 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
494 cpumask_first(policy->related_cpus));
495 if (IS_ERR(thread)) {
496 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
497 return PTR_ERR(thread);
500 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
502 kthread_stop(thread);
503 pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
507 sg_policy->thread = thread;
509 /* Kthread is bound to all CPUs by default */
510 if (!policy->dvfs_possible_from_any_cpu)
511 kthread_bind_mask(thread, policy->related_cpus);
513 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
514 mutex_init(&sg_policy->work_lock);
516 wake_up_process(thread);
521 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
523 /* kthread only required for slow path */
524 if (sg_policy->policy->fast_switch_enabled)
527 kthread_flush_worker(&sg_policy->worker);
528 kthread_stop(sg_policy->thread);
529 mutex_destroy(&sg_policy->work_lock);
532 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
534 struct sugov_tunables *tunables;
536 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
538 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
539 if (!have_governor_per_policy())
540 global_tunables = tunables;
545 static void sugov_clear_global_tunables(void)
547 if (!have_governor_per_policy())
548 global_tunables = NULL;
551 static int sugov_init(struct cpufreq_policy *policy)
553 struct sugov_policy *sg_policy;
554 struct sugov_tunables *tunables;
557 /* State should be equivalent to EXIT */
558 if (policy->governor_data)
561 cpufreq_enable_fast_switch(policy);
563 sg_policy = sugov_policy_alloc(policy);
566 goto disable_fast_switch;
569 ret = sugov_kthread_create(sg_policy);
573 mutex_lock(&global_tunables_lock);
575 if (global_tunables) {
576 if (WARN_ON(have_governor_per_policy())) {
580 policy->governor_data = sg_policy;
581 sg_policy->tunables = global_tunables;
583 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
587 tunables = sugov_tunables_alloc(sg_policy);
593 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
595 policy->governor_data = sg_policy;
596 sg_policy->tunables = tunables;
598 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
599 get_governor_parent_kobj(policy), "%s",
605 mutex_unlock(&global_tunables_lock);
609 kobject_put(&tunables->attr_set.kobj);
610 policy->governor_data = NULL;
611 sugov_clear_global_tunables();
614 sugov_kthread_stop(sg_policy);
615 mutex_unlock(&global_tunables_lock);
618 sugov_policy_free(sg_policy);
621 cpufreq_disable_fast_switch(policy);
623 pr_err("initialization failed (error %d)\n", ret);
627 static void sugov_exit(struct cpufreq_policy *policy)
629 struct sugov_policy *sg_policy = policy->governor_data;
630 struct sugov_tunables *tunables = sg_policy->tunables;
633 mutex_lock(&global_tunables_lock);
635 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
636 policy->governor_data = NULL;
638 sugov_clear_global_tunables();
640 mutex_unlock(&global_tunables_lock);
642 sugov_kthread_stop(sg_policy);
643 sugov_policy_free(sg_policy);
644 cpufreq_disable_fast_switch(policy);
647 static int sugov_start(struct cpufreq_policy *policy)
649 struct sugov_policy *sg_policy = policy->governor_data;
652 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
653 sg_policy->last_freq_update_time = 0;
654 sg_policy->next_freq = UINT_MAX;
655 sg_policy->work_in_progress = false;
656 sg_policy->need_freq_update = false;
657 sg_policy->cached_raw_freq = 0;
659 for_each_cpu(cpu, policy->cpus) {
660 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
662 memset(sg_cpu, 0, sizeof(*sg_cpu));
664 sg_cpu->sg_policy = sg_policy;
665 sg_cpu->flags = SCHED_CPUFREQ_RT;
666 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
669 for_each_cpu(cpu, policy->cpus) {
670 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
672 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
673 policy_is_shared(policy) ?
674 sugov_update_shared :
675 sugov_update_single);
680 static void sugov_stop(struct cpufreq_policy *policy)
682 struct sugov_policy *sg_policy = policy->governor_data;
685 for_each_cpu(cpu, policy->cpus)
686 cpufreq_remove_update_util_hook(cpu);
690 if (!policy->fast_switch_enabled) {
691 irq_work_sync(&sg_policy->irq_work);
692 kthread_cancel_work_sync(&sg_policy->work);
696 static void sugov_limits(struct cpufreq_policy *policy)
698 struct sugov_policy *sg_policy = policy->governor_data;
700 if (!policy->fast_switch_enabled) {
701 mutex_lock(&sg_policy->work_lock);
702 cpufreq_policy_apply_limits(policy);
703 mutex_unlock(&sg_policy->work_lock);
706 sg_policy->need_freq_update = true;
709 static struct cpufreq_governor schedutil_gov = {
711 .owner = THIS_MODULE,
712 .dynamic_switching = true,
715 .start = sugov_start,
717 .limits = sugov_limits,
720 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
721 struct cpufreq_governor *cpufreq_default_governor(void)
723 return &schedutil_gov;
727 static int __init sugov_register(void)
729 return cpufreq_register_governor(&schedutil_gov);
731 fs_initcall(sugov_register);