1 // SPDX-License-Identifier: GPL-2.0
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
18 struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
24 struct cpufreq_policy *policy;
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
29 raw_spinlock_t update_lock;
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
44 bool need_freq_update;
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
60 /* The field below is for single-CPU policies only: */
61 #ifdef CONFIG_NO_HZ_COMMON
62 unsigned long saved_idle_calls;
66 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
68 /************************ Governor internals ***********************/
70 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
75 * Since cpufreq_update_util() is called with rq->lock held for
76 * the @target_cpu, our per-CPU data is fully serialized.
78 * However, drivers cannot in general deal with cross-CPU
79 * requests, so while get_next_freq() will work, our
80 * sugov_update_commit() call may not for the fast switching platforms.
82 * Hence stop here for remote requests if they aren't supported
83 * by the hardware, as calculating the frequency is pointless if
84 * we cannot in fact act on it.
86 * This is needed on the slow switching platforms too to prevent CPUs
87 * going offline from leaving stale IRQ work items behind.
89 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
92 if (unlikely(sg_policy->limits_changed)) {
93 sg_policy->limits_changed = false;
94 sg_policy->need_freq_update = true;
98 delta_ns = time - sg_policy->last_freq_update_time;
100 return delta_ns >= sg_policy->freq_update_delay_ns;
103 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104 unsigned int next_freq)
106 if (sg_policy->need_freq_update)
107 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
108 else if (sg_policy->next_freq == next_freq)
111 sg_policy->next_freq = next_freq;
112 sg_policy->last_freq_update_time = time;
117 static void sugov_deferred_update(struct sugov_policy *sg_policy)
119 if (!sg_policy->work_in_progress) {
120 sg_policy->work_in_progress = true;
121 irq_work_queue(&sg_policy->irq_work);
126 * get_next_freq - Compute a new frequency for a given cpufreq policy.
127 * @sg_policy: schedutil policy object to compute the new frequency for.
128 * @util: Current CPU utilization.
129 * @max: CPU capacity.
131 * If the utilization is frequency-invariant, choose the new frequency to be
132 * proportional to it, that is
134 * next_freq = C * max_freq * util / max
136 * Otherwise, approximate the would-be frequency-invariant utilization by
137 * util_raw * (curr_freq / max_freq) which leads to
139 * next_freq = C * curr_freq * util_raw / max
141 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
143 * The lowest driver-supported frequency which is equal or greater than the raw
144 * next_freq (as calculated above) is returned, subject to policy min/max and
145 * cpufreq driver limitations.
147 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
148 unsigned long util, unsigned long max)
150 struct cpufreq_policy *policy = sg_policy->policy;
151 unsigned int freq = arch_scale_freq_invariant() ?
152 policy->cpuinfo.max_freq : policy->cur;
154 util = map_util_perf(util);
155 freq = map_util_freq(util, freq, max);
157 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
158 return sg_policy->next_freq;
160 sg_policy->cached_raw_freq = freq;
161 return cpufreq_driver_resolve_freq(policy, freq);
164 static void sugov_get_util(struct sugov_cpu *sg_cpu)
166 struct rq *rq = cpu_rq(sg_cpu->cpu);
167 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
170 sg_cpu->bw_dl = cpu_bw_dl(rq);
171 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), max,
172 FREQUENCY_UTIL, NULL);
176 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
177 * @sg_cpu: the sugov data for the CPU to boost
178 * @time: the update time from the caller
179 * @set_iowait_boost: true if an IO boost has been requested
181 * The IO wait boost of a task is disabled after a tick since the last update
182 * of a CPU. If a new IO wait boost is requested after more then a tick, then
183 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
184 * efficiency by ignoring sporadic wakeups from IO.
186 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
187 bool set_iowait_boost)
189 s64 delta_ns = time - sg_cpu->last_update;
191 /* Reset boost only if a tick has elapsed since last request */
192 if (delta_ns <= TICK_NSEC)
195 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
196 sg_cpu->iowait_boost_pending = set_iowait_boost;
202 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
203 * @sg_cpu: the sugov data for the CPU to boost
204 * @time: the update time from the caller
205 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
207 * Each time a task wakes up after an IO operation, the CPU utilization can be
208 * boosted to a certain utilization which doubles at each "frequent and
209 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
210 * of the maximum OPP.
212 * To keep doubling, an IO boost has to be requested at least once per tick,
213 * otherwise we restart from the utilization of the minimum OPP.
215 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
218 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
220 /* Reset boost if the CPU appears to have been idle enough */
221 if (sg_cpu->iowait_boost &&
222 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
225 /* Boost only tasks waking up after IO */
226 if (!set_iowait_boost)
229 /* Ensure boost doubles only one time at each request */
230 if (sg_cpu->iowait_boost_pending)
232 sg_cpu->iowait_boost_pending = true;
234 /* Double the boost at each request */
235 if (sg_cpu->iowait_boost) {
236 sg_cpu->iowait_boost =
237 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
241 /* First wakeup after IO: start with minimum boost */
242 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
246 * sugov_iowait_apply() - Apply the IO boost to a CPU.
247 * @sg_cpu: the sugov data for the cpu to boost
248 * @time: the update time from the caller
250 * A CPU running a task which woken up after an IO operation can have its
251 * utilization boosted to speed up the completion of those IO operations.
252 * The IO boost value is increased each time a task wakes up from IO, in
253 * sugov_iowait_apply(), and it's instead decreased by this function,
254 * each time an increase has not been requested (!iowait_boost_pending).
256 * A CPU which also appears to have been idle for at least one tick has also
257 * its IO boost utilization reset.
259 * This mechanism is designed to boost high frequently IO waiting tasks, while
260 * being more conservative on tasks which does sporadic IO operations.
262 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
266 /* No boost currently required */
267 if (!sg_cpu->iowait_boost)
270 /* Reset boost if the CPU appears to have been idle enough */
271 if (sugov_iowait_reset(sg_cpu, time, false))
274 if (!sg_cpu->iowait_boost_pending) {
276 * No boost pending; reduce the boost value.
278 sg_cpu->iowait_boost >>= 1;
279 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
280 sg_cpu->iowait_boost = 0;
285 sg_cpu->iowait_boost_pending = false;
288 * sg_cpu->util is already in capacity scale; convert iowait_boost
289 * into the same scale so we can compare.
291 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
292 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
293 if (sg_cpu->util < boost)
294 sg_cpu->util = boost;
297 #ifdef CONFIG_NO_HZ_COMMON
298 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
300 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
301 bool ret = idle_calls == sg_cpu->saved_idle_calls;
303 sg_cpu->saved_idle_calls = idle_calls;
307 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
308 #endif /* CONFIG_NO_HZ_COMMON */
311 * Make sugov_should_update_freq() ignore the rate limit when DL
312 * has increased the utilization.
314 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
316 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
317 sg_cpu->sg_policy->limits_changed = true;
320 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
321 u64 time, unsigned int flags)
323 sugov_iowait_boost(sg_cpu, time, flags);
324 sg_cpu->last_update = time;
326 ignore_dl_rate_limit(sg_cpu);
328 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
331 sugov_get_util(sg_cpu);
332 sugov_iowait_apply(sg_cpu, time);
337 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
340 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
341 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
342 unsigned int cached_freq = sg_policy->cached_raw_freq;
345 if (!sugov_update_single_common(sg_cpu, time, flags))
348 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
350 * Do not reduce the frequency if the CPU has not been idle
351 * recently, as the reduction is likely to be premature then.
353 * Except when the rq is capped by uclamp_max.
355 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
356 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
357 next_f = sg_policy->next_freq;
359 /* Restore cached freq as next_freq has changed */
360 sg_policy->cached_raw_freq = cached_freq;
363 if (!sugov_update_next_freq(sg_policy, time, next_f))
367 * This code runs under rq->lock for the target CPU, so it won't run
368 * concurrently on two different CPUs for the same target and it is not
369 * necessary to acquire the lock in the fast switch case.
371 if (sg_policy->policy->fast_switch_enabled) {
372 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
374 raw_spin_lock(&sg_policy->update_lock);
375 sugov_deferred_update(sg_policy);
376 raw_spin_unlock(&sg_policy->update_lock);
380 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
383 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
384 unsigned long prev_util = sg_cpu->util;
387 * Fall back to the "frequency" path if frequency invariance is not
388 * supported, because the direct mapping between the utilization and
389 * the performance levels depends on the frequency invariance.
391 if (!arch_scale_freq_invariant()) {
392 sugov_update_single_freq(hook, time, flags);
396 if (!sugov_update_single_common(sg_cpu, time, flags))
400 * Do not reduce the target performance level if the CPU has not been
401 * idle recently, as the reduction is likely to be premature then.
403 * Except when the rq is capped by uclamp_max.
405 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
406 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
407 sg_cpu->util = prev_util;
409 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
410 map_util_perf(sg_cpu->util), sg_cpu->max);
412 sg_cpu->sg_policy->last_freq_update_time = time;
415 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
417 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
418 struct cpufreq_policy *policy = sg_policy->policy;
419 unsigned long util = 0, max = 1;
422 for_each_cpu(j, policy->cpus) {
423 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
424 unsigned long j_util, j_max;
426 sugov_get_util(j_sg_cpu);
427 sugov_iowait_apply(j_sg_cpu, time);
428 j_util = j_sg_cpu->util;
429 j_max = j_sg_cpu->max;
431 if (j_util * max > j_max * util) {
437 return get_next_freq(sg_policy, util, max);
441 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
443 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
444 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
447 raw_spin_lock(&sg_policy->update_lock);
449 sugov_iowait_boost(sg_cpu, time, flags);
450 sg_cpu->last_update = time;
452 ignore_dl_rate_limit(sg_cpu);
454 if (sugov_should_update_freq(sg_policy, time)) {
455 next_f = sugov_next_freq_shared(sg_cpu, time);
457 if (!sugov_update_next_freq(sg_policy, time, next_f))
460 if (sg_policy->policy->fast_switch_enabled)
461 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
463 sugov_deferred_update(sg_policy);
466 raw_spin_unlock(&sg_policy->update_lock);
469 static void sugov_work(struct kthread_work *work)
471 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
476 * Hold sg_policy->update_lock shortly to handle the case where:
477 * in case sg_policy->next_freq is read here, and then updated by
478 * sugov_deferred_update() just before work_in_progress is set to false
479 * here, we may miss queueing the new update.
481 * Note: If a work was queued after the update_lock is released,
482 * sugov_work() will just be called again by kthread_work code; and the
483 * request will be proceed before the sugov thread sleeps.
485 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
486 freq = sg_policy->next_freq;
487 sg_policy->work_in_progress = false;
488 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
490 mutex_lock(&sg_policy->work_lock);
491 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
492 mutex_unlock(&sg_policy->work_lock);
495 static void sugov_irq_work(struct irq_work *irq_work)
497 struct sugov_policy *sg_policy;
499 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
501 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
504 /************************** sysfs interface ************************/
506 static struct sugov_tunables *global_tunables;
507 static DEFINE_MUTEX(global_tunables_lock);
509 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
511 return container_of(attr_set, struct sugov_tunables, attr_set);
514 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
516 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
518 return sprintf(buf, "%u\n", tunables->rate_limit_us);
522 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
524 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
525 struct sugov_policy *sg_policy;
526 unsigned int rate_limit_us;
528 if (kstrtouint(buf, 10, &rate_limit_us))
531 tunables->rate_limit_us = rate_limit_us;
533 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
534 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
539 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
541 static struct attribute *sugov_attrs[] = {
545 ATTRIBUTE_GROUPS(sugov);
547 static void sugov_tunables_free(struct kobject *kobj)
549 struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
551 kfree(to_sugov_tunables(attr_set));
554 static struct kobj_type sugov_tunables_ktype = {
555 .default_groups = sugov_groups,
556 .sysfs_ops = &governor_sysfs_ops,
557 .release = &sugov_tunables_free,
560 /********************** cpufreq governor interface *********************/
562 struct cpufreq_governor schedutil_gov;
564 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
566 struct sugov_policy *sg_policy;
568 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
572 sg_policy->policy = policy;
573 raw_spin_lock_init(&sg_policy->update_lock);
577 static void sugov_policy_free(struct sugov_policy *sg_policy)
582 static int sugov_kthread_create(struct sugov_policy *sg_policy)
584 struct task_struct *thread;
585 struct sched_attr attr = {
586 .size = sizeof(struct sched_attr),
587 .sched_policy = SCHED_DEADLINE,
588 .sched_flags = SCHED_FLAG_SUGOV,
592 * Fake (unused) bandwidth; workaround to "fix"
593 * priority inheritance.
595 .sched_runtime = 1000000,
596 .sched_deadline = 10000000,
597 .sched_period = 10000000,
599 struct cpufreq_policy *policy = sg_policy->policy;
602 /* kthread only required for slow path */
603 if (policy->fast_switch_enabled)
606 kthread_init_work(&sg_policy->work, sugov_work);
607 kthread_init_worker(&sg_policy->worker);
608 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
610 cpumask_first(policy->related_cpus));
611 if (IS_ERR(thread)) {
612 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
613 return PTR_ERR(thread);
616 ret = sched_setattr_nocheck(thread, &attr);
618 kthread_stop(thread);
619 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
623 sg_policy->thread = thread;
624 kthread_bind_mask(thread, policy->related_cpus);
625 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
626 mutex_init(&sg_policy->work_lock);
628 wake_up_process(thread);
633 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
635 /* kthread only required for slow path */
636 if (sg_policy->policy->fast_switch_enabled)
639 kthread_flush_worker(&sg_policy->worker);
640 kthread_stop(sg_policy->thread);
641 mutex_destroy(&sg_policy->work_lock);
644 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
646 struct sugov_tunables *tunables;
648 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
650 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
651 if (!have_governor_per_policy())
652 global_tunables = tunables;
657 static void sugov_clear_global_tunables(void)
659 if (!have_governor_per_policy())
660 global_tunables = NULL;
663 static int sugov_init(struct cpufreq_policy *policy)
665 struct sugov_policy *sg_policy;
666 struct sugov_tunables *tunables;
669 /* State should be equivalent to EXIT */
670 if (policy->governor_data)
673 cpufreq_enable_fast_switch(policy);
675 sg_policy = sugov_policy_alloc(policy);
678 goto disable_fast_switch;
681 ret = sugov_kthread_create(sg_policy);
685 mutex_lock(&global_tunables_lock);
687 if (global_tunables) {
688 if (WARN_ON(have_governor_per_policy())) {
692 policy->governor_data = sg_policy;
693 sg_policy->tunables = global_tunables;
695 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
699 tunables = sugov_tunables_alloc(sg_policy);
705 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
707 policy->governor_data = sg_policy;
708 sg_policy->tunables = tunables;
710 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
711 get_governor_parent_kobj(policy), "%s",
717 mutex_unlock(&global_tunables_lock);
721 kobject_put(&tunables->attr_set.kobj);
722 policy->governor_data = NULL;
723 sugov_clear_global_tunables();
726 sugov_kthread_stop(sg_policy);
727 mutex_unlock(&global_tunables_lock);
730 sugov_policy_free(sg_policy);
733 cpufreq_disable_fast_switch(policy);
735 pr_err("initialization failed (error %d)\n", ret);
739 static void sugov_exit(struct cpufreq_policy *policy)
741 struct sugov_policy *sg_policy = policy->governor_data;
742 struct sugov_tunables *tunables = sg_policy->tunables;
745 mutex_lock(&global_tunables_lock);
747 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
748 policy->governor_data = NULL;
750 sugov_clear_global_tunables();
752 mutex_unlock(&global_tunables_lock);
754 sugov_kthread_stop(sg_policy);
755 sugov_policy_free(sg_policy);
756 cpufreq_disable_fast_switch(policy);
759 static int sugov_start(struct cpufreq_policy *policy)
761 struct sugov_policy *sg_policy = policy->governor_data;
762 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
765 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
766 sg_policy->last_freq_update_time = 0;
767 sg_policy->next_freq = 0;
768 sg_policy->work_in_progress = false;
769 sg_policy->limits_changed = false;
770 sg_policy->cached_raw_freq = 0;
772 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
774 for_each_cpu(cpu, policy->cpus) {
775 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
777 memset(sg_cpu, 0, sizeof(*sg_cpu));
779 sg_cpu->sg_policy = sg_policy;
782 if (policy_is_shared(policy))
783 uu = sugov_update_shared;
784 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
785 uu = sugov_update_single_perf;
787 uu = sugov_update_single_freq;
789 for_each_cpu(cpu, policy->cpus) {
790 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
792 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
797 static void sugov_stop(struct cpufreq_policy *policy)
799 struct sugov_policy *sg_policy = policy->governor_data;
802 for_each_cpu(cpu, policy->cpus)
803 cpufreq_remove_update_util_hook(cpu);
807 if (!policy->fast_switch_enabled) {
808 irq_work_sync(&sg_policy->irq_work);
809 kthread_cancel_work_sync(&sg_policy->work);
813 static void sugov_limits(struct cpufreq_policy *policy)
815 struct sugov_policy *sg_policy = policy->governor_data;
817 if (!policy->fast_switch_enabled) {
818 mutex_lock(&sg_policy->work_lock);
819 cpufreq_policy_apply_limits(policy);
820 mutex_unlock(&sg_policy->work_lock);
823 sg_policy->limits_changed = true;
826 struct cpufreq_governor schedutil_gov = {
828 .owner = THIS_MODULE,
829 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
832 .start = sugov_start,
834 .limits = sugov_limits,
837 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
838 struct cpufreq_governor *cpufreq_default_governor(void)
840 return &schedutil_gov;
844 cpufreq_governor_init(schedutil_gov);
846 #ifdef CONFIG_ENERGY_MODEL
847 static void rebuild_sd_workfn(struct work_struct *work)
849 rebuild_sched_domains_energy();
851 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
854 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
855 * on governor changes to make sure the scheduler knows about it.
857 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
858 struct cpufreq_governor *old_gov)
860 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
862 * When called from the cpufreq_register_driver() path, the
863 * cpu_hotplug_lock is already held, so use a work item to
864 * avoid nested locking in rebuild_sched_domains().
866 schedule_work(&rebuild_sd_work);