1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/cpufreq.h>
16 #include <linux/slab.h>
17 #include <linux/acpi.h>
18 #include <acpi/processor.h>
20 #include <asm/cpufeature.h>
23 #define PREFIX "ACPI: "
25 #define ACPI_PROCESSOR_CLASS "processor"
26 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
27 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
28 ACPI_MODULE_NAME("processor_perflib");
30 static DEFINE_MUTEX(performance_mutex);
33 * _PPC support is implemented as a CPUfreq policy notifier:
34 * This means each time a CPUfreq driver registered also with
35 * the ACPI core is asked to change the speed policy, the maximum
36 * value is adjusted so that it is within the platform limit.
38 * Also, when a new platform limit value is detected, the CPUfreq
39 * policy is adjusted accordingly.
43 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
45 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
46 * 1 -> ignore _PPC totally -> forced by user through boot param
48 static int ignore_ppc = -1;
49 module_param(ignore_ppc, int, 0644);
50 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
51 "limited by BIOS, this should help");
53 static bool acpi_processor_ppc_in_use;
55 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
57 acpi_status status = 0;
58 unsigned long long ppc = 0;
67 * _PPC indicates the maximum state currently supported by the platform
68 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
70 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
72 if (status != AE_NOT_FOUND)
73 acpi_processor_ppc_in_use = true;
75 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
76 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
82 if (pr->performance_platform_limit == index ||
83 ppc >= pr->performance->state_count)
86 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
87 index, index ? "is" : "is not");
89 pr->performance_platform_limit = index;
91 if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
95 * If _PPC returns 0, it means that all of the available states can be
99 qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
101 qos_value = pr->performance->states[index].core_frequency * 1000;
103 ret = freq_qos_update_request(&pr->perflib_req, qos_value);
105 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
112 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
114 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
115 * @handle: ACPI processor handle
116 * @status: the status code of _PPC evaluation
117 * 0: success. OSPM is now using the performance state specificed.
118 * 1: failure. OSPM has not changed the number of P-states in use
120 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
122 if (acpi_has_method(handle, "_OST"))
123 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
127 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
131 if (ignore_ppc || !pr->performance) {
133 * Only when it is notification event, the _OST object
134 * will be evaluated. Otherwise it is skipped.
137 acpi_processor_ppc_ost(pr->handle, 1);
141 ret = acpi_processor_get_platform_limit(pr);
143 * Only when it is notification event, the _OST object
144 * will be evaluated. Otherwise it is skipped.
148 acpi_processor_ppc_ost(pr->handle, 1);
150 acpi_processor_ppc_ost(pr->handle, 0);
153 cpufreq_update_limits(pr->id);
156 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
158 struct acpi_processor *pr;
160 pr = per_cpu(processors, cpu);
161 if (!pr || !pr->performance || !pr->performance->state_count)
163 *limit = pr->performance->states[pr->performance_platform_limit].
164 core_frequency * 1000;
167 EXPORT_SYMBOL(acpi_processor_get_bios_limit);
169 void acpi_processor_ignore_ppc_init(void)
175 void acpi_processor_ppc_init(struct cpufreq_policy *policy)
179 for_each_cpu(cpu, policy->related_cpus) {
180 struct acpi_processor *pr = per_cpu(processors, cpu);
187 * Reset performance_platform_limit in case there is a stale
188 * value in it, so as to make it match the "no limit" QoS value
191 pr->performance_platform_limit = 0;
193 ret = freq_qos_add_request(&policy->constraints,
194 &pr->perflib_req, FREQ_QOS_MAX,
195 FREQ_QOS_MAX_DEFAULT_VALUE);
197 pr_err("Failed to add freq constraint for CPU%d (%d)\n",
202 void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
206 for_each_cpu(cpu, policy->related_cpus) {
207 struct acpi_processor *pr = per_cpu(processors, cpu);
210 freq_qos_remove_request(&pr->perflib_req);
214 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
217 acpi_status status = 0;
218 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
219 union acpi_object *pct = NULL;
220 union acpi_object obj = { 0 };
223 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
224 if (ACPI_FAILURE(status)) {
225 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
229 pct = (union acpi_object *)buffer.pointer;
230 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
231 || (pct->package.count != 2)) {
232 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
241 obj = pct->package.elements[0];
243 if ((obj.type != ACPI_TYPE_BUFFER)
244 || (obj.buffer.length < sizeof(struct acpi_pct_register))
245 || (obj.buffer.pointer == NULL)) {
246 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
250 memcpy(&pr->performance->control_register, obj.buffer.pointer,
251 sizeof(struct acpi_pct_register));
257 obj = pct->package.elements[1];
259 if ((obj.type != ACPI_TYPE_BUFFER)
260 || (obj.buffer.length < sizeof(struct acpi_pct_register))
261 || (obj.buffer.pointer == NULL)) {
262 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
267 memcpy(&pr->performance->status_register, obj.buffer.pointer,
268 sizeof(struct acpi_pct_register));
271 kfree(buffer.pointer);
278 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
279 * in their ACPI data. Calculate the real values and fix up the _PSS data.
281 static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
283 u32 hi, lo, fid, did;
284 int index = px->control & 0x00000007;
286 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
289 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
290 || boot_cpu_data.x86 == 0x11) {
291 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
294 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
301 if (boot_cpu_data.x86 == 0x10)
302 px->core_frequency = (100 * (fid + 0x10)) >> did;
304 px->core_frequency = (100 * (fid + 8)) >> did;
308 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
311 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
314 acpi_status status = AE_OK;
315 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
316 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
317 struct acpi_buffer state = { 0, NULL };
318 union acpi_object *pss = NULL;
320 int last_invalid = -1;
323 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
324 if (ACPI_FAILURE(status)) {
325 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
329 pss = buffer.pointer;
330 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
331 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
336 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
337 pss->package.count));
339 pr->performance->state_count = pss->package.count;
340 pr->performance->states =
341 kmalloc_array(pss->package.count,
342 sizeof(struct acpi_processor_px),
344 if (!pr->performance->states) {
349 for (i = 0; i < pr->performance->state_count; i++) {
351 struct acpi_processor_px *px = &(pr->performance->states[i]);
353 state.length = sizeof(struct acpi_processor_px);
356 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
358 status = acpi_extract_package(&(pss->package.elements[i]),
360 if (ACPI_FAILURE(status)) {
361 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
363 kfree(pr->performance->states);
367 amd_fixup_frequency(px, i);
369 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
370 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
372 (u32) px->core_frequency,
374 (u32) px->transition_latency,
375 (u32) px->bus_master_latency,
376 (u32) px->control, (u32) px->status));
379 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
381 if (!px->core_frequency ||
382 ((u32)(px->core_frequency * 1000) !=
383 (px->core_frequency * 1000))) {
384 printk(KERN_ERR FW_BUG PREFIX
385 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
386 pr->id, px->core_frequency);
387 if (last_invalid == -1)
390 if (last_invalid != -1) {
392 * Copy this valid entry over last_invalid entry
394 memcpy(&(pr->performance->states[last_invalid]),
395 px, sizeof(struct acpi_processor_px));
401 if (last_invalid == 0) {
402 printk(KERN_ERR FW_BUG PREFIX
403 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
405 kfree(pr->performance->states);
406 pr->performance->states = NULL;
409 if (last_invalid > 0)
410 pr->performance->state_count = last_invalid;
413 kfree(buffer.pointer);
418 int acpi_processor_get_performance_info(struct acpi_processor *pr)
422 if (!pr || !pr->performance || !pr->handle)
425 if (!acpi_has_method(pr->handle, "_PCT")) {
426 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
427 "ACPI-based processor performance control unavailable\n"));
431 result = acpi_processor_get_performance_control(pr);
435 result = acpi_processor_get_performance_states(pr);
439 /* We need to call _PPC once when cpufreq starts */
441 result = acpi_processor_get_platform_limit(pr);
446 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
447 * the BIOS is older than the CPU and does not know its frequencies
451 if (acpi_has_method(pr->handle, "_PPC")) {
452 if(boot_cpu_has(X86_FEATURE_EST))
453 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
454 "frequency support\n");
459 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
461 int acpi_processor_pstate_control(void)
465 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
468 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
469 "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
470 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
472 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
473 (u32)acpi_gbl_FADT.pstate_control, 8);
474 if (ACPI_SUCCESS(status))
477 ACPI_EXCEPTION((AE_INFO, status,
478 "Failed to write pstate_control [0x%x] to smi_command [0x%x]",
479 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
483 int acpi_processor_notify_smm(struct module *calling_module)
485 static int is_done = 0;
488 if (!acpi_processor_cpufreq_init)
491 if (!try_module_get(calling_module))
494 /* is_done is set to negative if an error occurred,
495 * and to postitive if _no_ error occurred, but SMM
496 * was already notified. This avoids double notification
497 * which might lead to unexpected results...
500 module_put(calling_module);
502 } else if (is_done < 0) {
503 module_put(calling_module);
509 result = acpi_processor_pstate_control();
511 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
512 module_put(calling_module);
516 module_put(calling_module);
520 /* Success. If there's no _PPC, we need to fear nothing, so
521 * we can allow the cpufreq driver to be rmmod'ed. */
524 if (!acpi_processor_ppc_in_use)
525 module_put(calling_module);
530 EXPORT_SYMBOL(acpi_processor_notify_smm);
532 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
535 acpi_status status = AE_OK;
536 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
537 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
538 struct acpi_buffer state = {0, NULL};
539 union acpi_object *psd = NULL;
541 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
542 if (ACPI_FAILURE(status)) {
546 psd = buffer.pointer;
547 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
548 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
553 if (psd->package.count != 1) {
554 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
559 state.length = sizeof(struct acpi_psd_package);
560 state.pointer = pdomain;
562 status = acpi_extract_package(&(psd->package.elements[0]),
564 if (ACPI_FAILURE(status)) {
565 printk(KERN_ERR PREFIX "Invalid _PSD data\n");
570 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
571 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
576 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
577 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
582 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
583 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
584 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
585 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
590 kfree(buffer.pointer);
593 EXPORT_SYMBOL(acpi_processor_get_psd);
595 int acpi_processor_preregister_performance(
596 struct acpi_processor_performance __percpu *performance)
601 cpumask_var_t covered_cpus;
602 struct acpi_processor *pr;
603 struct acpi_psd_package *pdomain;
604 struct acpi_processor *match_pr;
605 struct acpi_psd_package *match_pdomain;
607 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
610 mutex_lock(&performance_mutex);
613 * Check if another driver has already registered, and abort before
614 * changing pr->performance if it has. Check input data as well.
616 for_each_possible_cpu(i) {
617 pr = per_cpu(processors, i);
619 /* Look only at processors in ACPI namespace */
623 if (pr->performance) {
628 if (!performance || !per_cpu_ptr(performance, i)) {
634 /* Call _PSD for all CPUs */
635 for_each_possible_cpu(i) {
636 pr = per_cpu(processors, i);
640 pr->performance = per_cpu_ptr(performance, i);
641 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
642 pdomain = &(pr->performance->domain_info);
643 if (acpi_processor_get_psd(pr->handle, pdomain)) {
652 * Now that we have _PSD data from all CPUs, lets setup P-state
655 for_each_possible_cpu(i) {
656 pr = per_cpu(processors, i);
660 if (cpumask_test_cpu(i, covered_cpus))
663 pdomain = &(pr->performance->domain_info);
664 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
665 cpumask_set_cpu(i, covered_cpus);
666 if (pdomain->num_processors <= 1)
669 /* Validate the Domain info */
670 count_target = pdomain->num_processors;
671 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
672 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
673 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
674 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
675 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
676 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
678 for_each_possible_cpu(j) {
682 match_pr = per_cpu(processors, j);
686 match_pdomain = &(match_pr->performance->domain_info);
687 if (match_pdomain->domain != pdomain->domain)
690 /* Here i and j are in the same domain */
692 if (match_pdomain->num_processors != count_target) {
697 if (pdomain->coord_type != match_pdomain->coord_type) {
702 cpumask_set_cpu(j, covered_cpus);
703 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
706 for_each_possible_cpu(j) {
710 match_pr = per_cpu(processors, j);
714 match_pdomain = &(match_pr->performance->domain_info);
715 if (match_pdomain->domain != pdomain->domain)
718 match_pr->performance->shared_type =
719 pr->performance->shared_type;
720 cpumask_copy(match_pr->performance->shared_cpu_map,
721 pr->performance->shared_cpu_map);
726 for_each_possible_cpu(i) {
727 pr = per_cpu(processors, i);
728 if (!pr || !pr->performance)
731 /* Assume no coordination on any error parsing domain info */
733 cpumask_clear(pr->performance->shared_cpu_map);
734 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
735 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
737 pr->performance = NULL; /* Will be set for real in register */
741 mutex_unlock(&performance_mutex);
742 free_cpumask_var(covered_cpus);
745 EXPORT_SYMBOL(acpi_processor_preregister_performance);
748 acpi_processor_register_performance(struct acpi_processor_performance
749 *performance, unsigned int cpu)
751 struct acpi_processor *pr;
753 if (!acpi_processor_cpufreq_init)
756 mutex_lock(&performance_mutex);
758 pr = per_cpu(processors, cpu);
760 mutex_unlock(&performance_mutex);
764 if (pr->performance) {
765 mutex_unlock(&performance_mutex);
769 WARN_ON(!performance);
771 pr->performance = performance;
773 if (acpi_processor_get_performance_info(pr)) {
774 pr->performance = NULL;
775 mutex_unlock(&performance_mutex);
779 mutex_unlock(&performance_mutex);
783 EXPORT_SYMBOL(acpi_processor_register_performance);
785 void acpi_processor_unregister_performance(unsigned int cpu)
787 struct acpi_processor *pr;
789 mutex_lock(&performance_mutex);
791 pr = per_cpu(processors, cpu);
793 mutex_unlock(&performance_mutex);
798 kfree(pr->performance->states);
799 pr->performance = NULL;
801 mutex_unlock(&performance_mutex);
806 EXPORT_SYMBOL(acpi_processor_unregister_performance);