1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
7 * Author: Huang Rui <ray.huang@amd.com>
9 * AMD P-State introduces a new CPU performance scaling design for AMD
10 * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11 * feature which works with the AMD SMU firmware providing a finer grained
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
14 * communicate the performance hints to hardware.
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/smp.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/compiler.h>
32 #include <linux/dmi.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
36 #include <linux/delay.h>
37 #include <linux/uaccess.h>
38 #include <linux/static_call.h>
40 #include <acpi/processor.h>
41 #include <acpi/cppc_acpi.h>
44 #include <asm/processor.h>
45 #include <asm/cpufeature.h>
46 #include <asm/cpu_device_id.h>
47 #include "amd-pstate-trace.h"
49 #define AMD_PSTATE_TRANSITION_LATENCY 0x20000
50 #define AMD_PSTATE_TRANSITION_DELAY 500
53 * TODO: We need more time to fine tune processors with shared memory solution
54 * with community together.
56 * There are some performance drops on the CPU benchmarks which reports from
57 * Suse. We are co-working with them to fine tune the shared memory solution. So
58 * we disable it by default to go acpi-cpufreq on these processors and add a
59 * module parameter to be able to enable it manually for debugging.
61 static bool shared_mem = false;
62 module_param(shared_mem, bool, 0444);
63 MODULE_PARM_DESC(shared_mem,
64 "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
66 static struct cpufreq_driver amd_pstate_driver;
69 * struct amd_aperf_mperf
70 * @aperf: actual performance frequency clock count
71 * @mperf: maximum performance frequency clock count
72 * @tsc: time stamp counter
74 struct amd_aperf_mperf {
81 * struct amd_cpudata - private CPU data for AMD P-State
83 * @req: constraint request to apply
84 * @cppc_req_cached: cached performance request hints
85 * @highest_perf: the maximum performance an individual processor may reach,
86 * assuming ideal conditions
87 * @nominal_perf: the maximum sustained performance level of the processor,
88 * assuming ideal operating conditions
89 * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
90 * savings are achieved
91 * @lowest_perf: the absolute lowest performance level of the processor
92 * @max_freq: the frequency that mapped to highest_perf
93 * @min_freq: the frequency that mapped to lowest_perf
94 * @nominal_freq: the frequency that mapped to nominal_perf
95 * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
96 * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
97 * @prev: Last Aperf/Mperf/tsc count value read from register
98 * @freq: current cpu frequency value
99 * @boost_supported: check whether the Processor or SBIOS supports boost mode
101 * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
102 * represents all the attributes and goals that AMD P-State requests at runtime.
107 struct freq_qos_request req[2];
112 u32 lowest_nonlinear_perf;
118 u32 lowest_nonlinear_freq;
120 struct amd_aperf_mperf cur;
121 struct amd_aperf_mperf prev;
124 bool boost_supported;
127 static inline int pstate_enable(bool enable)
129 return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
132 static int cppc_enable(bool enable)
136 for_each_present_cpu(cpu) {
137 ret = cppc_set_enable(cpu, enable);
145 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
147 static inline int amd_pstate_enable(bool enable)
149 return static_call(amd_pstate_enable)(enable);
152 static int pstate_init_perf(struct amd_cpudata *cpudata)
156 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
162 * TODO: Introduce AMD specific power feature.
164 * CPPC entry doesn't indicate the highest performance in some ASICs.
166 WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
168 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
169 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
170 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
175 static int cppc_init_perf(struct amd_cpudata *cpudata)
177 struct cppc_perf_caps cppc_perf;
179 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
183 WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
185 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
186 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
187 cppc_perf.lowest_nonlinear_perf);
188 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
193 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
195 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
197 return static_call(amd_pstate_init_perf)(cpudata);
200 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
201 u32 des_perf, u32 max_perf, bool fast_switch)
204 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
206 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
207 READ_ONCE(cpudata->cppc_req_cached));
210 static void cppc_update_perf(struct amd_cpudata *cpudata,
211 u32 min_perf, u32 des_perf,
212 u32 max_perf, bool fast_switch)
214 struct cppc_perf_ctrls perf_ctrls;
216 perf_ctrls.max_perf = max_perf;
217 perf_ctrls.min_perf = min_perf;
218 perf_ctrls.desired_perf = des_perf;
220 cppc_set_perf(cpudata->cpu, &perf_ctrls);
223 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
225 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
226 u32 min_perf, u32 des_perf,
227 u32 max_perf, bool fast_switch)
229 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
230 max_perf, fast_switch);
233 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
235 u64 aperf, mperf, tsc;
238 local_irq_save(flags);
239 rdmsrl(MSR_IA32_APERF, aperf);
240 rdmsrl(MSR_IA32_MPERF, mperf);
243 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
244 local_irq_restore(flags);
248 local_irq_restore(flags);
250 cpudata->cur.aperf = aperf;
251 cpudata->cur.mperf = mperf;
252 cpudata->cur.tsc = tsc;
253 cpudata->cur.aperf -= cpudata->prev.aperf;
254 cpudata->cur.mperf -= cpudata->prev.mperf;
255 cpudata->cur.tsc -= cpudata->prev.tsc;
257 cpudata->prev.aperf = aperf;
258 cpudata->prev.mperf = mperf;
259 cpudata->prev.tsc = tsc;
261 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
266 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
267 u32 des_perf, u32 max_perf, bool fast_switch)
269 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
272 value &= ~AMD_CPPC_MIN_PERF(~0L);
273 value |= AMD_CPPC_MIN_PERF(min_perf);
275 value &= ~AMD_CPPC_DES_PERF(~0L);
276 value |= AMD_CPPC_DES_PERF(des_perf);
278 value &= ~AMD_CPPC_MAX_PERF(~0L);
279 value |= AMD_CPPC_MAX_PERF(max_perf);
281 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
282 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
283 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
284 cpudata->cpu, (value != prev), fast_switch);
290 WRITE_ONCE(cpudata->cppc_req_cached, value);
292 amd_pstate_update_perf(cpudata, min_perf, des_perf,
293 max_perf, fast_switch);
296 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
298 cpufreq_verify_within_cpu_limits(policy);
303 static int amd_pstate_target(struct cpufreq_policy *policy,
304 unsigned int target_freq,
305 unsigned int relation)
307 struct cpufreq_freqs freqs;
308 struct amd_cpudata *cpudata = policy->driver_data;
309 unsigned long max_perf, min_perf, des_perf, cap_perf;
311 if (!cpudata->max_freq)
314 cap_perf = READ_ONCE(cpudata->highest_perf);
315 min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
318 freqs.old = policy->cur;
319 freqs.new = target_freq;
321 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
324 cpufreq_freq_transition_begin(policy, &freqs);
325 amd_pstate_update(cpudata, min_perf, des_perf,
327 cpufreq_freq_transition_end(policy, &freqs, false);
332 static void amd_pstate_adjust_perf(unsigned int cpu,
333 unsigned long _min_perf,
334 unsigned long target_perf,
335 unsigned long capacity)
337 unsigned long max_perf, min_perf, des_perf,
338 cap_perf, lowest_nonlinear_perf;
339 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
340 struct amd_cpudata *cpudata = policy->driver_data;
342 cap_perf = READ_ONCE(cpudata->highest_perf);
343 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
346 if (target_perf < capacity)
347 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
349 min_perf = READ_ONCE(cpudata->highest_perf);
350 if (_min_perf < capacity)
351 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
353 if (min_perf < lowest_nonlinear_perf)
354 min_perf = lowest_nonlinear_perf;
357 if (max_perf < min_perf)
360 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
362 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
365 static int amd_get_min_freq(struct amd_cpudata *cpudata)
367 struct cppc_perf_caps cppc_perf;
369 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
374 return cppc_perf.lowest_freq * 1000;
377 static int amd_get_max_freq(struct amd_cpudata *cpudata)
379 struct cppc_perf_caps cppc_perf;
380 u32 max_perf, max_freq, nominal_freq, nominal_perf;
383 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
387 nominal_freq = cppc_perf.nominal_freq;
388 nominal_perf = READ_ONCE(cpudata->nominal_perf);
389 max_perf = READ_ONCE(cpudata->highest_perf);
391 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
394 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
397 return max_freq * 1000;
400 static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
402 struct cppc_perf_caps cppc_perf;
404 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
409 return cppc_perf.nominal_freq * 1000;
412 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
414 struct cppc_perf_caps cppc_perf;
415 u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
416 nominal_freq, nominal_perf;
417 u64 lowest_nonlinear_ratio;
419 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
423 nominal_freq = cppc_perf.nominal_freq;
424 nominal_perf = READ_ONCE(cpudata->nominal_perf);
426 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
428 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
431 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
434 return lowest_nonlinear_freq * 1000;
437 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
439 struct amd_cpudata *cpudata = policy->driver_data;
442 if (!cpudata->boost_supported) {
443 pr_err("Boost mode is not supported by this processor or SBIOS\n");
448 policy->cpuinfo.max_freq = cpudata->max_freq;
450 policy->cpuinfo.max_freq = cpudata->nominal_freq;
452 policy->max = policy->cpuinfo.max_freq;
454 ret = freq_qos_update_request(&cpudata->req[1],
455 policy->cpuinfo.max_freq);
462 static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
464 u32 highest_perf, nominal_perf;
466 highest_perf = READ_ONCE(cpudata->highest_perf);
467 nominal_perf = READ_ONCE(cpudata->nominal_perf);
469 if (highest_perf <= nominal_perf)
472 cpudata->boost_supported = true;
473 amd_pstate_driver.boost_enabled = true;
476 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
478 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
480 struct amd_cpudata *cpudata;
482 dev = get_cpu_device(policy->cpu);
486 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
490 cpudata->cpu = policy->cpu;
492 ret = amd_pstate_init_perf(cpudata);
496 min_freq = amd_get_min_freq(cpudata);
497 max_freq = amd_get_max_freq(cpudata);
498 nominal_freq = amd_get_nominal_freq(cpudata);
499 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
501 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
502 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
508 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
509 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
511 policy->min = min_freq;
512 policy->max = max_freq;
514 policy->cpuinfo.min_freq = min_freq;
515 policy->cpuinfo.max_freq = max_freq;
517 /* It will be updated by governor */
518 policy->cur = policy->cpuinfo.min_freq;
520 if (boot_cpu_has(X86_FEATURE_CPPC))
521 policy->fast_switch_possible = true;
523 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
524 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
526 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
530 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
531 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
533 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
537 /* Initial processor data capability frequencies */
538 cpudata->max_freq = max_freq;
539 cpudata->min_freq = min_freq;
540 cpudata->nominal_freq = nominal_freq;
541 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
543 policy->driver_data = cpudata;
545 amd_pstate_boost_init(cpudata);
550 freq_qos_remove_request(&cpudata->req[0]);
556 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
558 struct amd_cpudata *cpudata;
560 cpudata = policy->driver_data;
562 freq_qos_remove_request(&cpudata->req[1]);
563 freq_qos_remove_request(&cpudata->req[0]);
569 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
573 ret = amd_pstate_enable(true);
575 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
580 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
584 ret = amd_pstate_enable(false);
586 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
591 /* Sysfs attributes */
594 * This frequency is to indicate the maximum hardware frequency.
595 * If boost is not active but supported, the frequency will be larger than the
598 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
602 struct amd_cpudata *cpudata;
604 cpudata = policy->driver_data;
606 max_freq = amd_get_max_freq(cpudata);
610 return sprintf(&buf[0], "%u\n", max_freq);
613 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
617 struct amd_cpudata *cpudata;
619 cpudata = policy->driver_data;
621 freq = amd_get_lowest_nonlinear_freq(cpudata);
625 return sprintf(&buf[0], "%u\n", freq);
629 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
630 * need to expose it to sysfs.
632 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
636 struct amd_cpudata *cpudata = policy->driver_data;
638 perf = READ_ONCE(cpudata->highest_perf);
640 return sprintf(&buf[0], "%u\n", perf);
643 cpufreq_freq_attr_ro(amd_pstate_max_freq);
644 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
646 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
648 static struct freq_attr *amd_pstate_attr[] = {
649 &amd_pstate_max_freq,
650 &amd_pstate_lowest_nonlinear_freq,
651 &amd_pstate_highest_perf,
655 static struct cpufreq_driver amd_pstate_driver = {
656 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
657 .verify = amd_pstate_verify,
658 .target = amd_pstate_target,
659 .init = amd_pstate_cpu_init,
660 .exit = amd_pstate_cpu_exit,
661 .suspend = amd_pstate_cpu_suspend,
662 .resume = amd_pstate_cpu_resume,
663 .set_boost = amd_pstate_set_boost,
664 .name = "amd-pstate",
665 .attr = amd_pstate_attr,
668 static int __init amd_pstate_init(void)
672 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
675 if (!acpi_cpc_valid()) {
676 pr_debug("the _CPC object is not present in SBIOS\n");
680 /* don't keep reloading if cpufreq_driver exists */
681 if (cpufreq_get_current_driver())
684 /* capability check */
685 if (boot_cpu_has(X86_FEATURE_CPPC)) {
686 pr_debug("AMD CPPC MSR based functionality is supported\n");
687 amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
688 } else if (shared_mem) {
689 static_call_update(amd_pstate_enable, cppc_enable);
690 static_call_update(amd_pstate_init_perf, cppc_init_perf);
691 static_call_update(amd_pstate_update_perf, cppc_update_perf);
693 pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
697 /* enable amd pstate feature */
698 ret = amd_pstate_enable(true);
700 pr_err("failed to enable amd-pstate with return %d\n", ret);
704 ret = cpufreq_register_driver(&amd_pstate_driver);
706 pr_err("failed to register amd_pstate_driver with return %d\n",
712 static void __exit amd_pstate_exit(void)
714 cpufreq_unregister_driver(&amd_pstate_driver);
716 amd_pstate_enable(false);
719 module_init(amd_pstate_init);
720 module_exit(amd_pstate_exit);
722 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
723 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
724 MODULE_LICENSE("GPL");