2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
27 * - Platform conveys its decision back to OS
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
38 #define pr_fmt(fmt) "ACPI CPPC: " fmt
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
43 #include <acpi/cppc_acpi.h>
45 * Lock to provide mutually exclusive access to the PCC
46 * channel. e.g. When the remote updates the shared region
47 * with new data, the reader needs to be protected from
48 * other CPUs activity on the same channel.
50 static DEFINE_SPINLOCK(pcc_lock);
53 * The cpc_desc structure contains the ACPI register details
54 * as described in the per CPU _CPC tables. The details
55 * include the type of register (e.g. PCC, System IO, FFH etc.)
56 * and destination addresses which lets us READ/WRITE CPU performance
57 * information using the appropriate I/O methods.
59 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
61 /* This layer handles all the PCC specifics for CPPC. */
62 static struct mbox_chan *pcc_channel;
63 static void __iomem *pcc_comm_addr;
64 static u64 comm_base_addr;
65 static int pcc_subspace_idx = -1;
66 static u16 pcc_cmd_delay;
67 static bool pcc_channel_acquired;
70 * Arbitrary Retries in case the remote processor is slow to respond
73 #define NUM_RETRIES 500
75 static int send_pcc_cmd(u16 cmd)
77 int retries, result = -EIO;
78 struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
79 struct acpi_pcct_shared_memory *generic_comm_base =
80 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
81 u32 cmd_latency = pcct_ss->latency;
83 /* Min time OS should wait before sending next command. */
84 udelay(pcc_cmd_delay);
86 /* Write to the shared comm region. */
87 writew(cmd, &generic_comm_base->command);
89 /* Flip CMD COMPLETE bit */
90 writew(0, &generic_comm_base->status);
93 result = mbox_send_message(pcc_channel, &cmd);
95 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
100 /* Wait for a nominal time to let platform process command. */
103 /* Retry in case the remote processor was too slow to catch up. */
104 for (retries = NUM_RETRIES; retries > 0; retries--) {
105 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
111 mbox_client_txdone(pcc_channel, result);
115 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
118 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
121 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
125 struct mbox_client cppc_mbox_cl = {
126 .tx_done = cppc_chan_tx_done,
127 .knows_txdone = true,
130 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
132 int result = -EFAULT;
133 acpi_status status = AE_OK;
134 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
135 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
136 struct acpi_buffer state = {0, NULL};
137 union acpi_object *psd = NULL;
138 struct acpi_psd_package *pdomain;
140 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
141 &buffer, ACPI_TYPE_PACKAGE);
142 if (status == AE_NOT_FOUND) /* _PSD is optional */
144 if (ACPI_FAILURE(status))
147 psd = buffer.pointer;
148 if (!psd || psd->package.count != 1) {
149 pr_debug("Invalid _PSD data\n");
153 pdomain = &(cpc_ptr->domain_info);
155 state.length = sizeof(struct acpi_psd_package);
156 state.pointer = pdomain;
158 status = acpi_extract_package(&(psd->package.elements[0]),
160 if (ACPI_FAILURE(status)) {
161 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
165 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
166 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
170 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
171 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
175 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
176 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
177 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
178 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
184 kfree(buffer.pointer);
189 * acpi_get_psd_map - Map the CPUs in a common freq domain.
190 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
192 * Return: 0 for success or negative value for err.
194 int acpi_get_psd_map(struct cpudata **all_cpu_data)
199 cpumask_var_t covered_cpus;
200 struct cpudata *pr, *match_pr;
201 struct acpi_psd_package *pdomain;
202 struct acpi_psd_package *match_pdomain;
203 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
205 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
209 * Now that we have _PSD data from all CPUs, lets setup P-state
212 for_each_possible_cpu(i) {
213 pr = all_cpu_data[i];
217 if (cpumask_test_cpu(i, covered_cpus))
220 cpc_ptr = per_cpu(cpc_desc_ptr, i);
226 pdomain = &(cpc_ptr->domain_info);
227 cpumask_set_cpu(i, pr->shared_cpu_map);
228 cpumask_set_cpu(i, covered_cpus);
229 if (pdomain->num_processors <= 1)
232 /* Validate the Domain info */
233 count_target = pdomain->num_processors;
234 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
235 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
236 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
237 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
238 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
239 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
241 for_each_possible_cpu(j) {
245 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
246 if (!match_cpc_ptr) {
251 match_pdomain = &(match_cpc_ptr->domain_info);
252 if (match_pdomain->domain != pdomain->domain)
255 /* Here i and j are in the same domain */
256 if (match_pdomain->num_processors != count_target) {
261 if (pdomain->coord_type != match_pdomain->coord_type) {
266 cpumask_set_cpu(j, covered_cpus);
267 cpumask_set_cpu(j, pr->shared_cpu_map);
270 for_each_possible_cpu(j) {
274 match_pr = all_cpu_data[j];
278 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
279 if (!match_cpc_ptr) {
284 match_pdomain = &(match_cpc_ptr->domain_info);
285 if (match_pdomain->domain != pdomain->domain)
288 match_pr->shared_type = pr->shared_type;
289 cpumask_copy(match_pr->shared_cpu_map,
295 for_each_possible_cpu(i) {
296 pr = all_cpu_data[i];
300 /* Assume no coordination on any error parsing domain info */
302 cpumask_clear(pr->shared_cpu_map);
303 cpumask_set_cpu(i, pr->shared_cpu_map);
304 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
308 free_cpumask_var(covered_cpus);
311 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
313 static int register_pcc_channel(int pcc_subspace_idx)
315 struct acpi_pcct_hw_reduced *cppc_ss;
318 if (pcc_subspace_idx >= 0) {
319 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
322 if (IS_ERR(pcc_channel)) {
323 pr_err("Failed to find PCC communication channel\n");
328 * The PCC mailbox controller driver should
329 * have parsed the PCCT (global table of all
330 * PCC channels) and stored pointers to the
331 * subspace communication region in con_priv.
333 cppc_ss = pcc_channel->con_priv;
336 pr_err("No PCC subspace found for CPPC\n");
341 * This is the shared communication region
342 * for the OS and Platform to communicate over.
344 comm_base_addr = cppc_ss->base_address;
345 len = cppc_ss->length;
346 pcc_cmd_delay = cppc_ss->min_turnaround_time;
348 pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
349 if (!pcc_comm_addr) {
350 pr_err("Failed to ioremap PCC comm region mem\n");
354 /* Set flag so that we dont come here for each CPU. */
355 pcc_channel_acquired = true;
362 * An example CPC table looks like the following.
364 * Name(_CPC, Package()
370 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
371 * // Highest Performance
372 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
373 * // Nominal Performance
374 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
375 * // Lowest Nonlinear Performance
376 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
377 * // Lowest Performance
378 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
379 * // Guaranteed Performance Register
380 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
381 * // Desired Performance Register
382 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
388 * Each Register() encodes how to access that specific register.
389 * e.g. a sample PCC entry has the following encoding:
393 * AddressSpaceKeyword
397 * //RegisterBitOffset
401 * //AccessSize (subspace ID)
408 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
409 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
411 * Return: 0 for success or negative value for err.
413 int acpi_cppc_processor_probe(struct acpi_processor *pr)
415 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
416 union acpi_object *out_obj, *cpc_obj;
417 struct cpc_desc *cpc_ptr;
418 struct cpc_reg *gas_t;
419 acpi_handle handle = pr->handle;
420 unsigned int num_ent, i, cpc_rev;
424 /* Parse the ACPI _CPC table for this cpu. */
425 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
427 if (ACPI_FAILURE(status)) {
432 out_obj = (union acpi_object *) output.pointer;
434 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
440 /* First entry is NumEntries. */
441 cpc_obj = &out_obj->package.elements[0];
442 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
443 num_ent = cpc_obj->integer.value;
445 pr_debug("Unexpected entry type(%d) for NumEntries\n",
450 /* Only support CPPCv2. Bail otherwise. */
451 if (num_ent != CPPC_NUM_ENT) {
452 pr_debug("Firmware exports %d entries. Expected: %d\n",
453 num_ent, CPPC_NUM_ENT);
457 /* Second entry should be revision. */
458 cpc_obj = &out_obj->package.elements[1];
459 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
460 cpc_rev = cpc_obj->integer.value;
462 pr_debug("Unexpected entry type(%d) for Revision\n",
467 if (cpc_rev != CPPC_REV) {
468 pr_debug("Firmware exports revision:%d. Expected:%d\n",
473 /* Iterate through remaining entries in _CPC */
474 for (i = 2; i < num_ent; i++) {
475 cpc_obj = &out_obj->package.elements[i];
477 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
478 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
479 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
480 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
481 gas_t = (struct cpc_reg *)
482 cpc_obj->buffer.pointer;
485 * The PCC Subspace index is encoded inside
486 * the CPC table entries. The same PCC index
487 * will be used for all the PCC entries,
488 * so extract it only once.
490 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
491 if (pcc_subspace_idx < 0)
492 pcc_subspace_idx = gas_t->access_width;
493 else if (pcc_subspace_idx != gas_t->access_width) {
494 pr_debug("Mismatched PCC ids.\n");
497 } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
498 /* Support only PCC and SYS MEM type regs */
499 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
503 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
504 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
506 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
510 /* Store CPU Logical ID */
511 cpc_ptr->cpu_id = pr->id;
513 /* Parse PSD data for this CPU */
514 ret = acpi_get_psd(cpc_ptr, handle);
518 /* Register PCC channel once for all CPUs. */
519 if (!pcc_channel_acquired) {
520 ret = register_pcc_channel(pcc_subspace_idx);
525 /* Plug PSD data into this CPUs CPC descriptor. */
526 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
528 /* Everything looks okay */
529 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
531 kfree(output.pointer);
538 kfree(output.pointer);
541 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
544 * acpi_cppc_processor_exit - Cleanup CPC structs.
545 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
549 void acpi_cppc_processor_exit(struct acpi_processor *pr)
551 struct cpc_desc *cpc_ptr;
552 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
555 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
557 static u64 get_phys_addr(struct cpc_reg *reg)
559 /* PCC communication addr space begins at byte offset 0x8. */
560 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
561 return (u64)comm_base_addr + 0x8 + reg->address;
566 static void cpc_read(struct cpc_reg *reg, u64 *val)
568 u64 addr = get_phys_addr(reg);
570 acpi_os_read_memory((acpi_physical_address)addr,
571 val, reg->bit_width);
574 static void cpc_write(struct cpc_reg *reg, u64 val)
576 u64 addr = get_phys_addr(reg);
578 acpi_os_write_memory((acpi_physical_address)addr,
579 val, reg->bit_width);
583 * cppc_get_perf_caps - Get a CPUs performance capabilities.
584 * @cpunum: CPU from which to get capabilities info.
585 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
587 * Return: 0 for success with perf_caps populated else -ERRNO.
589 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
591 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
592 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
594 u64 high, low, ref, nom;
598 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
602 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
603 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
604 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
605 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
607 spin_lock(&pcc_lock);
609 /* Are any of the regs PCC ?*/
610 if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
611 (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
612 (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
613 (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
614 /* Ring doorbell once to update PCC subspace */
615 if (send_pcc_cmd(CMD_READ)) {
621 cpc_read(&highest_reg->cpc_entry.reg, &high);
622 perf_caps->highest_perf = high;
624 cpc_read(&lowest_reg->cpc_entry.reg, &low);
625 perf_caps->lowest_perf = low;
627 cpc_read(&ref_perf->cpc_entry.reg, &ref);
628 perf_caps->reference_perf = ref;
630 cpc_read(&nom_perf->cpc_entry.reg, &nom);
631 perf_caps->nominal_perf = nom;
634 perf_caps->reference_perf = perf_caps->nominal_perf;
636 if (!high || !low || !nom)
640 spin_unlock(&pcc_lock);
643 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
646 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
647 * @cpunum: CPU from which to read counters.
648 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
650 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
652 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
654 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
655 struct cpc_register_resource *delivered_reg, *reference_reg;
656 u64 delivered, reference;
660 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
664 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
665 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
667 spin_lock(&pcc_lock);
669 /* Are any of the regs PCC ?*/
670 if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
671 (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
672 /* Ring doorbell once to update PCC subspace */
673 if (send_pcc_cmd(CMD_READ)) {
679 cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
680 cpc_read(&reference_reg->cpc_entry.reg, &reference);
682 if (!delivered || !reference) {
687 perf_fb_ctrs->delivered = delivered;
688 perf_fb_ctrs->reference = reference;
690 perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
691 perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
693 perf_fb_ctrs->prev_delivered = delivered;
694 perf_fb_ctrs->prev_reference = reference;
697 spin_unlock(&pcc_lock);
700 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
703 * cppc_set_perf - Set a CPUs performance controls.
704 * @cpu: CPU for which to set performance controls.
705 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
707 * Return: 0 for success, -ERRNO otherwise.
709 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
711 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
712 struct cpc_register_resource *desired_reg;
716 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
720 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
722 spin_lock(&pcc_lock);
725 * Skip writing MIN/MAX until Linux knows how to come up with
728 cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
730 /* Is this a PCC reg ?*/
731 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
732 /* Ring doorbell so Remote can get our perf request. */
733 if (send_pcc_cmd(CMD_WRITE))
737 spin_unlock(&pcc_lock);
741 EXPORT_SYMBOL_GPL(cppc_set_perf);