1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_powerclamp.c - package c-state idle injection
5 * Copyright (c) 2012, Intel Corporation.
8 * Arjan van de Ven <arjan@linux.intel.com>
9 * Jacob Pan <jacob.jun.pan@linux.intel.com>
12 * 1. better handle wakeup from external interrupts, currently a fixed
13 * compensation is added to clamping duration when excessive amount
14 * of wakeups are observed during idle time. the reason is that in
15 * case of external interrupts without need for ack, clamping down
16 * cpu in non-irq context does not reduce irq. for majority of the
17 * cases, clamping down cpu does help reduce irq as well, we should
18 * be able to differentiate the two cases and give a quantitative
19 * solution for the irqs that we can control. perhaps based on
20 * get_cpu_iowait_time_us()
22 * 2. synchronization with other hw blocks
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/cpu.h>
32 #include <linux/thermal.h>
33 #include <linux/slab.h>
34 #include <linux/tick.h>
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #include <linux/sched/rt.h>
38 #include <uapi/linux/sched/types.h>
42 #include <asm/mwait.h>
43 #include <asm/cpu_device_id.h>
44 #include <asm/hardirq.h>
46 #define MAX_TARGET_RATIO (50U)
47 /* For each undisturbed clamping period (no extra wake ups during idle time),
48 * we increment the confidence counter for the given target ratio.
49 * CONFIDENCE_OK defines the level where runtime calibration results are
52 #define CONFIDENCE_OK (3)
53 /* Default idle injection duration, driver adjust sleep time to meet target
54 * idle ratio. Similar to frequency modulation.
56 #define DEFAULT_DURATION_JIFFIES (6)
58 static unsigned int target_mwait;
59 static struct dentry *debug_dir;
60 static bool poll_pkg_cstate_enable;
62 /* user selected target */
63 static unsigned int set_target_ratio;
64 static unsigned int current_ratio;
65 static bool should_skip;
67 static unsigned int control_cpu; /* The cpu assigned to collect stat and update
68 * control parameters. default to BSP but BSP
73 struct powerclamp_worker_data {
74 struct kthread_worker *worker;
75 struct kthread_work balancing_work;
76 struct kthread_delayed_work idle_injection_work;
80 unsigned int window_size_now;
81 unsigned int target_ratio;
82 unsigned int duration_jiffies;
86 static struct powerclamp_worker_data __percpu *worker_data;
87 static struct thermal_cooling_device *cooling_dev;
88 static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
89 * clamping kthread worker
92 static unsigned int duration;
93 static unsigned int pkg_cstate_ratio_cur;
94 static unsigned int window_size;
96 static int duration_set(const char *arg, const struct kernel_param *kp)
99 unsigned long new_duration;
101 ret = kstrtoul(arg, 10, &new_duration);
104 if (new_duration > 25 || new_duration < 6) {
105 pr_err("Out of recommended range %lu, between 6-25ms\n",
110 duration = clamp(new_duration, 6ul, 25ul);
118 static const struct kernel_param_ops duration_ops = {
120 .get = param_get_int,
124 module_param_cb(duration, &duration_ops, &duration, 0644);
125 MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
127 struct powerclamp_calibration_data {
128 unsigned long confidence; /* used for calibration, basically a counter
129 * gets incremented each time a clamping
130 * period is completed without extra wakeups
131 * once that counter is reached given level,
132 * compensation is deemed usable.
134 unsigned long steady_comp; /* steady state compensation used when
135 * no extra wakeups occurred.
137 unsigned long dynamic_comp; /* compensate excessive wakeup from idle
138 * mostly from external interrupts.
142 static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
144 static int window_size_set(const char *arg, const struct kernel_param *kp)
147 unsigned long new_window_size;
149 ret = kstrtoul(arg, 10, &new_window_size);
152 if (new_window_size > 10 || new_window_size < 2) {
153 pr_err("Out of recommended window size %lu, between 2-10\n",
158 window_size = clamp(new_window_size, 2ul, 10ul);
166 static const struct kernel_param_ops window_size_ops = {
167 .set = window_size_set,
168 .get = param_get_int,
171 module_param_cb(window_size, &window_size_ops, &window_size, 0644);
172 MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
173 "\tpowerclamp controls idle ratio within this window. larger\n"
174 "\twindow size results in slower response time but more smooth\n"
175 "\tclamping results. default to 2.");
177 static void find_target_mwait(void)
179 unsigned int eax, ebx, ecx, edx;
180 unsigned int highest_cstate = 0;
181 unsigned int highest_subcstate = 0;
184 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
187 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
189 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
190 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
193 edx >>= MWAIT_SUBSTATE_SIZE;
194 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
195 if (edx & MWAIT_SUBSTATE_MASK) {
197 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
200 target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
201 (highest_subcstate - 1);
205 struct pkg_cstate_info {
211 #define PKG_CSTATE_INIT(id) { \
212 .msr_index = MSR_PKG_C##id##_RESIDENCY, \
216 static struct pkg_cstate_info pkg_cstates[] = {
227 static bool has_pkg_state_counter(void)
230 struct pkg_cstate_info *info = pkg_cstates;
232 /* check if any one of the counter msrs exists */
233 while (info->msr_index) {
234 if (!rdmsrl_safe(info->msr_index, &val))
242 static u64 pkg_state_counter(void)
246 struct pkg_cstate_info *info = pkg_cstates;
248 while (info->msr_index) {
250 if (!rdmsrl_safe(info->msr_index, &val))
261 static unsigned int get_compensation(int ratio)
263 unsigned int comp = 0;
265 if (!poll_pkg_cstate_enable)
268 /* we only use compensation if all adjacent ones are good */
270 cal_data[ratio].confidence >= CONFIDENCE_OK &&
271 cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
272 cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
273 comp = (cal_data[ratio].steady_comp +
274 cal_data[ratio + 1].steady_comp +
275 cal_data[ratio + 2].steady_comp) / 3;
276 } else if (ratio == MAX_TARGET_RATIO - 1 &&
277 cal_data[ratio].confidence >= CONFIDENCE_OK &&
278 cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
279 cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
280 comp = (cal_data[ratio].steady_comp +
281 cal_data[ratio - 1].steady_comp +
282 cal_data[ratio - 2].steady_comp) / 3;
283 } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
284 cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
285 cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
286 comp = (cal_data[ratio].steady_comp +
287 cal_data[ratio - 1].steady_comp +
288 cal_data[ratio + 1].steady_comp) / 3;
291 /* do not exceed limit */
292 if (comp + ratio >= MAX_TARGET_RATIO)
293 comp = MAX_TARGET_RATIO - ratio - 1;
298 static void adjust_compensation(int target_ratio, unsigned int win)
301 struct powerclamp_calibration_data *d = &cal_data[target_ratio];
304 * adjust compensations if confidence level has not been reached.
306 if (d->confidence >= CONFIDENCE_OK)
309 delta = set_target_ratio - current_ratio;
310 /* filter out bad data */
311 if (delta >= 0 && delta <= (1+target_ratio/10)) {
314 roundup(delta+d->steady_comp, 2)/2;
316 d->steady_comp = delta;
321 static bool powerclamp_adjust_controls(unsigned int target_ratio,
322 unsigned int guard, unsigned int win)
324 static u64 msr_last, tsc_last;
325 u64 msr_now, tsc_now;
328 /* check result for the last window */
329 msr_now = pkg_state_counter();
332 /* calculate pkg cstate vs tsc ratio */
333 if (!msr_last || !tsc_last)
335 else if (tsc_now-tsc_last) {
336 val64 = 100*(msr_now-msr_last);
337 do_div(val64, (tsc_now-tsc_last));
338 current_ratio = val64;
345 adjust_compensation(target_ratio, win);
347 /* if we are above target+guard, skip */
348 return set_target_ratio + guard <= current_ratio;
351 static void clamp_balancing_func(struct kthread_work *work)
353 struct powerclamp_worker_data *w_data;
355 unsigned long target_jiffies;
356 unsigned int compensated_ratio;
357 int interval; /* jiffies to sleep for each attempt */
359 w_data = container_of(work, struct powerclamp_worker_data,
363 * make sure user selected ratio does not take effect until
364 * the next round. adjust target_ratio if user has changed
365 * target such that we can converge quickly.
367 w_data->target_ratio = READ_ONCE(set_target_ratio);
368 w_data->guard = 1 + w_data->target_ratio / 20;
369 w_data->window_size_now = window_size;
370 w_data->duration_jiffies = msecs_to_jiffies(duration);
374 * systems may have different ability to enter package level
375 * c-states, thus we need to compensate the injected idle ratio
376 * to achieve the actual target reported by the HW.
378 compensated_ratio = w_data->target_ratio +
379 get_compensation(w_data->target_ratio);
380 if (compensated_ratio <= 0)
381 compensated_ratio = 1;
382 interval = w_data->duration_jiffies * 100 / compensated_ratio;
384 /* align idle time */
385 target_jiffies = roundup(jiffies, interval);
386 sleeptime = target_jiffies - jiffies;
390 if (clamping && w_data->clamping && cpu_online(w_data->cpu))
391 kthread_queue_delayed_work(w_data->worker,
392 &w_data->idle_injection_work,
396 static void clamp_idle_injection_func(struct kthread_work *work)
398 struct powerclamp_worker_data *w_data;
400 w_data = container_of(work, struct powerclamp_worker_data,
401 idle_injection_work.work);
404 * only elected controlling cpu can collect stats and update
405 * control parameters.
407 if (w_data->cpu == control_cpu &&
408 !(w_data->count % w_data->window_size_now)) {
410 powerclamp_adjust_controls(w_data->target_ratio,
412 w_data->window_size_now);
419 play_idle(jiffies_to_usecs(w_data->duration_jiffies));
422 if (clamping && w_data->clamping && cpu_online(w_data->cpu))
423 kthread_queue_work(w_data->worker, &w_data->balancing_work);
427 * 1 HZ polling while clamping is active, useful for userspace
428 * to monitor actual idle ratio.
430 static void poll_pkg_cstate(struct work_struct *dummy);
431 static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
432 static void poll_pkg_cstate(struct work_struct *dummy)
441 msr_now = pkg_state_counter();
444 /* calculate pkg cstate vs tsc ratio */
445 if (!msr_last || !tsc_last)
446 pkg_cstate_ratio_cur = 1;
448 if (tsc_now - tsc_last) {
449 val64 = 100 * (msr_now - msr_last);
450 do_div(val64, (tsc_now - tsc_last));
451 pkg_cstate_ratio_cur = val64;
459 if (true == clamping)
460 schedule_delayed_work(&poll_pkg_cstate_work, HZ);
463 static void start_power_clamp_worker(unsigned long cpu)
465 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
466 struct kthread_worker *worker;
468 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
472 w_data->worker = worker;
475 w_data->clamping = true;
476 set_bit(cpu, cpu_clamping_mask);
477 sched_set_fifo(worker->task);
478 kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
479 kthread_init_delayed_work(&w_data->idle_injection_work,
480 clamp_idle_injection_func);
481 kthread_queue_work(w_data->worker, &w_data->balancing_work);
484 static void stop_power_clamp_worker(unsigned long cpu)
486 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
491 w_data->clamping = false;
493 * Make sure that all works that get queued after this point see
494 * the clamping disabled. The counter part is not needed because
495 * there is an implicit memory barrier when the queued work
499 kthread_cancel_work_sync(&w_data->balancing_work);
500 kthread_cancel_delayed_work_sync(&w_data->idle_injection_work);
502 * The balancing work still might be queued here because
503 * the handling of the "clapming" variable, cancel, and queue
504 * operations are not synchronized via a lock. But it is not
505 * a big deal. The balancing work is fast and destroy kthread
508 clear_bit(w_data->cpu, cpu_clamping_mask);
509 kthread_destroy_worker(w_data->worker);
511 w_data->worker = NULL;
514 static int start_power_clamp(void)
518 set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
519 /* prevent cpu hotplug */
523 control_cpu = cpumask_first(cpu_online_mask);
526 if (poll_pkg_cstate_enable)
527 schedule_delayed_work(&poll_pkg_cstate_work, 0);
529 /* start one kthread worker per online cpu */
530 for_each_online_cpu(cpu) {
531 start_power_clamp_worker(cpu);
538 static void end_power_clamp(void)
543 * Block requeuing in all the kthread workers. They will flush and
547 for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
548 pr_debug("clamping worker for cpu %d alive, destroy\n", i);
549 stop_power_clamp_worker(i);
553 static int powerclamp_cpu_online(unsigned int cpu)
555 if (clamping == false)
557 start_power_clamp_worker(cpu);
558 /* prefer BSP as controlling CPU */
566 static int powerclamp_cpu_predown(unsigned int cpu)
568 if (clamping == false)
571 stop_power_clamp_worker(cpu);
572 if (cpu != control_cpu)
575 control_cpu = cpumask_first(cpu_online_mask);
576 if (control_cpu == cpu)
577 control_cpu = cpumask_next(cpu, cpu_online_mask);
582 static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
583 unsigned long *state)
585 *state = MAX_TARGET_RATIO;
590 static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
591 unsigned long *state)
594 if (poll_pkg_cstate_enable)
595 *state = pkg_cstate_ratio_cur;
597 *state = set_target_ratio;
599 /* to save power, do not poll idle ratio while not clamping */
600 *state = -1; /* indicates invalid state */
606 static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
607 unsigned long new_target_ratio)
611 new_target_ratio = clamp(new_target_ratio, 0UL,
612 (unsigned long) (MAX_TARGET_RATIO-1));
613 if (set_target_ratio == 0 && new_target_ratio > 0) {
614 pr_info("Start idle injection to reduce power\n");
615 set_target_ratio = new_target_ratio;
616 ret = start_power_clamp();
618 } else if (set_target_ratio > 0 && new_target_ratio == 0) {
619 pr_info("Stop forced idle injection\n");
621 set_target_ratio = 0;
622 } else /* adjust currently running */ {
623 set_target_ratio = new_target_ratio;
624 /* make new set_target_ratio visible to other cpus */
632 /* bind to generic thermal layer as cooling device*/
633 static const struct thermal_cooling_device_ops powerclamp_cooling_ops = {
634 .get_max_state = powerclamp_get_max_state,
635 .get_cur_state = powerclamp_get_cur_state,
636 .set_cur_state = powerclamp_set_cur_state,
639 static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
640 X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL),
643 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
645 static int __init powerclamp_probe(void)
648 if (!x86_match_cpu(intel_powerclamp_ids)) {
649 pr_err("CPU does not support MWAIT\n");
653 /* The goal for idle time alignment is to achieve package cstate. */
654 if (!has_pkg_state_counter()) {
655 pr_info("No package C-state available\n");
659 /* find the deepest mwait value */
665 static int powerclamp_debug_show(struct seq_file *m, void *unused)
669 seq_printf(m, "controlling cpu: %d\n", control_cpu);
670 seq_printf(m, "pct confidence steady dynamic (compensation)\n");
671 for (i = 0; i < MAX_TARGET_RATIO; i++) {
672 seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
674 cal_data[i].confidence,
675 cal_data[i].steady_comp,
676 cal_data[i].dynamic_comp);
682 DEFINE_SHOW_ATTRIBUTE(powerclamp_debug);
684 static inline void powerclamp_create_debug_files(void)
686 debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
688 debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir, cal_data,
689 &powerclamp_debug_fops);
692 static enum cpuhp_state hp_state;
694 static int __init powerclamp_init(void)
698 cpu_clamping_mask = bitmap_zalloc(num_possible_cpus(), GFP_KERNEL);
699 if (!cpu_clamping_mask)
702 /* probe cpu features and ids here */
703 retval = powerclamp_probe();
707 /* set default limit, maybe adjusted during runtime based on feedback */
709 retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
710 "thermal/intel_powerclamp:online",
711 powerclamp_cpu_online,
712 powerclamp_cpu_predown);
718 worker_data = alloc_percpu(struct powerclamp_worker_data);
721 goto exit_unregister;
724 if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
725 poll_pkg_cstate_enable = true;
727 cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
728 &powerclamp_cooling_ops);
729 if (IS_ERR(cooling_dev)) {
731 goto exit_free_thread;
735 duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
737 powerclamp_create_debug_files();
742 free_percpu(worker_data);
744 cpuhp_remove_state_nocalls(hp_state);
746 bitmap_free(cpu_clamping_mask);
749 module_init(powerclamp_init);
751 static void __exit powerclamp_exit(void)
754 cpuhp_remove_state_nocalls(hp_state);
755 free_percpu(worker_data);
756 thermal_cooling_device_unregister(cooling_dev);
757 bitmap_free(cpu_clamping_mask);
759 cancel_delayed_work_sync(&poll_pkg_cstate_work);
760 debugfs_remove_recursive(debug_dir);
762 module_exit(powerclamp_exit);
764 MODULE_LICENSE("GPL");
765 MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
766 MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
767 MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");