2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
27 * - Platform conveys its decision back to OS
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
38 #define pr_fmt(fmt) "ACPI CPPC: " fmt
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42 #include <linux/iopoll.h>
43 #include <linux/ktime.h>
44 #include <linux/rwsem.h>
45 #include <linux/wait.h>
47 #include <acpi/cppc_acpi.h>
49 struct cppc_pcc_data {
50 struct mbox_chan *pcc_channel;
51 void __iomem *pcc_comm_addr;
52 bool pcc_channel_acquired;
53 unsigned int deadline_us;
54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
56 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
57 bool platform_owns_pcc; /* Ownership of PCC subspace */
58 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
61 * Lock to provide controlled access to the PCC channel.
63 * For performance critical usecases(currently cppc_set_perf)
64 * We need to take read_lock and check if channel belongs to OSPM
65 * before reading or writing to PCC subspace
66 * We need to take write_lock before transferring the channel
67 * ownership to the platform via a Doorbell
68 * This allows us to batch a number of CPPC requests if they happen
69 * to originate in about the same time
71 * For non-performance critical usecases(init)
72 * Take write_lock for all purposes which gives exclusive access
74 struct rw_semaphore pcc_lock;
76 /* Wait queue for CPUs whose requests were batched */
77 wait_queue_head_t pcc_write_wait_q;
78 ktime_t last_cmd_cmpl_time;
79 ktime_t last_mpar_reset;
84 /* Array to represent the PCC channel per subspace id */
85 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
86 /* The cpu_pcc_subspace_idx containsper CPU subspace id */
87 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
90 * The cpc_desc structure contains the ACPI register details
91 * as described in the per CPU _CPC tables. The details
92 * include the type of register (e.g. PCC, System IO, FFH etc.)
93 * and destination addresses which lets us READ/WRITE CPU performance
94 * information using the appropriate I/O methods.
96 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
98 /* pcc mapped address + header size + offset within PCC subspace */
99 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
102 /* Check if a CPC register is in PCC */
103 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
104 (cpc)->cpc_entry.reg.space_id == \
105 ACPI_ADR_SPACE_PLATFORM_COMM)
107 /* Evalutes to True if reg is a NULL register descriptor */
108 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
109 (reg)->address == 0 && \
110 (reg)->bit_width == 0 && \
111 (reg)->bit_offset == 0 && \
112 (reg)->access_width == 0)
114 /* Evalutes to True if an optional cpc field is supported */
115 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
116 !!(cpc)->cpc_entry.int_value : \
117 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
119 * Arbitrary Retries in case the remote processor is slow to respond
120 * to PCC commands. Keeping it high enough to cover emulators where
121 * the processors run painfully slow.
123 #define NUM_RETRIES 500ULL
125 #define define_one_cppc_ro(_name) \
126 static struct kobj_attribute _name = \
127 __ATTR(_name, 0444, show_##_name, NULL)
129 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
131 #define show_cppc_data(access_fn, struct_name, member_name) \
132 static ssize_t show_##member_name(struct kobject *kobj, \
133 struct kobj_attribute *attr, char *buf) \
135 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
136 struct struct_name st_name = {0}; \
139 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
143 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
144 (u64)st_name.member_name); \
146 define_one_cppc_ro(member_name)
148 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
149 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
150 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
151 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
152 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
153 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
155 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
156 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
158 static ssize_t show_feedback_ctrs(struct kobject *kobj,
159 struct kobj_attribute *attr, char *buf)
161 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
162 struct cppc_perf_fb_ctrs fb_ctrs = {0};
165 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
169 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
170 fb_ctrs.reference, fb_ctrs.delivered);
172 define_one_cppc_ro(feedback_ctrs);
174 static struct attribute *cppc_attrs[] = {
176 &reference_perf.attr,
177 &wraparound_time.attr,
180 &lowest_nonlinear_perf.attr,
187 static struct kobj_type cppc_ktype = {
188 .sysfs_ops = &kobj_sysfs_ops,
189 .default_attrs = cppc_attrs,
192 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
195 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
196 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
197 pcc_ss_data->pcc_comm_addr;
199 if (!pcc_ss_data->platform_owns_pcc)
203 * Poll PCC status register every 3us(delay_us) for maximum of
204 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
206 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
207 status & PCC_CMD_COMPLETE_MASK, 3,
208 pcc_ss_data->deadline_us);
211 pcc_ss_data->platform_owns_pcc = false;
212 if (chk_err_bit && (status & PCC_ERROR_MASK))
217 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
224 * This function transfers the ownership of the PCC to the platform
225 * So it must be called while holding write_lock(pcc_lock)
227 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
230 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
231 struct acpi_pcct_shared_memory *generic_comm_base =
232 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
233 unsigned int time_delta;
236 * For CMD_WRITE we know for a fact the caller should have checked
237 * the channel before writing to PCC space
239 if (cmd == CMD_READ) {
241 * If there are pending cpc_writes, then we stole the channel
242 * before write completion, so first send a WRITE command to
245 if (pcc_ss_data->pending_pcc_write_cmd)
246 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
248 ret = check_pcc_chan(pcc_ss_id, false);
251 } else /* CMD_WRITE */
252 pcc_ss_data->pending_pcc_write_cmd = FALSE;
255 * Handle the Minimum Request Turnaround Time(MRTT)
256 * "The minimum amount of time that OSPM must wait after the completion
257 * of a command before issuing the next command, in microseconds"
259 if (pcc_ss_data->pcc_mrtt) {
260 time_delta = ktime_us_delta(ktime_get(),
261 pcc_ss_data->last_cmd_cmpl_time);
262 if (pcc_ss_data->pcc_mrtt > time_delta)
263 udelay(pcc_ss_data->pcc_mrtt - time_delta);
267 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
268 * "The maximum number of periodic requests that the subspace channel can
269 * support, reported in commands per minute. 0 indicates no limitation."
271 * This parameter should be ideally zero or large enough so that it can
272 * handle maximum number of requests that all the cores in the system can
273 * collectively generate. If it is not, we will follow the spec and just
274 * not send the request to the platform after hitting the MPAR limit in
277 if (pcc_ss_data->pcc_mpar) {
278 if (pcc_ss_data->mpar_count == 0) {
279 time_delta = ktime_ms_delta(ktime_get(),
280 pcc_ss_data->last_mpar_reset);
281 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
282 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
287 pcc_ss_data->last_mpar_reset = ktime_get();
288 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
290 pcc_ss_data->mpar_count--;
293 /* Write to the shared comm region. */
294 writew_relaxed(cmd, &generic_comm_base->command);
296 /* Flip CMD COMPLETE bit */
297 writew_relaxed(0, &generic_comm_base->status);
299 pcc_ss_data->platform_owns_pcc = true;
302 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
304 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
305 pcc_ss_id, cmd, ret);
309 /* wait for completion and check for PCC errro bit */
310 ret = check_pcc_chan(pcc_ss_id, true);
312 if (pcc_ss_data->pcc_mrtt)
313 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
315 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
316 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
318 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
321 if (cmd == CMD_WRITE) {
323 for_each_possible_cpu(i) {
324 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
328 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
329 desc->write_cmd_status = ret;
332 pcc_ss_data->pcc_write_cnt++;
333 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
339 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
342 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
345 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
349 struct mbox_client cppc_mbox_cl = {
350 .tx_done = cppc_chan_tx_done,
351 .knows_txdone = true,
354 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
356 int result = -EFAULT;
357 acpi_status status = AE_OK;
358 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
359 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
360 struct acpi_buffer state = {0, NULL};
361 union acpi_object *psd = NULL;
362 struct acpi_psd_package *pdomain;
364 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
365 &buffer, ACPI_TYPE_PACKAGE);
366 if (status == AE_NOT_FOUND) /* _PSD is optional */
368 if (ACPI_FAILURE(status))
371 psd = buffer.pointer;
372 if (!psd || psd->package.count != 1) {
373 pr_debug("Invalid _PSD data\n");
377 pdomain = &(cpc_ptr->domain_info);
379 state.length = sizeof(struct acpi_psd_package);
380 state.pointer = pdomain;
382 status = acpi_extract_package(&(psd->package.elements[0]),
384 if (ACPI_FAILURE(status)) {
385 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
389 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
390 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
394 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
395 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
399 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
400 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
401 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
402 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
408 kfree(buffer.pointer);
413 * acpi_get_psd_map - Map the CPUs in a common freq domain.
414 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
416 * Return: 0 for success or negative value for err.
418 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
423 cpumask_var_t covered_cpus;
424 struct cppc_cpudata *pr, *match_pr;
425 struct acpi_psd_package *pdomain;
426 struct acpi_psd_package *match_pdomain;
427 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
429 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
433 * Now that we have _PSD data from all CPUs, lets setup P-state
436 for_each_possible_cpu(i) {
437 pr = all_cpu_data[i];
441 if (cpumask_test_cpu(i, covered_cpus))
444 cpc_ptr = per_cpu(cpc_desc_ptr, i);
450 pdomain = &(cpc_ptr->domain_info);
451 cpumask_set_cpu(i, pr->shared_cpu_map);
452 cpumask_set_cpu(i, covered_cpus);
453 if (pdomain->num_processors <= 1)
456 /* Validate the Domain info */
457 count_target = pdomain->num_processors;
458 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
459 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
460 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
461 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
462 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
463 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
465 for_each_possible_cpu(j) {
469 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
470 if (!match_cpc_ptr) {
475 match_pdomain = &(match_cpc_ptr->domain_info);
476 if (match_pdomain->domain != pdomain->domain)
479 /* Here i and j are in the same domain */
480 if (match_pdomain->num_processors != count_target) {
485 if (pdomain->coord_type != match_pdomain->coord_type) {
490 cpumask_set_cpu(j, covered_cpus);
491 cpumask_set_cpu(j, pr->shared_cpu_map);
494 for_each_possible_cpu(j) {
498 match_pr = all_cpu_data[j];
502 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
503 if (!match_cpc_ptr) {
508 match_pdomain = &(match_cpc_ptr->domain_info);
509 if (match_pdomain->domain != pdomain->domain)
512 match_pr->shared_type = pr->shared_type;
513 cpumask_copy(match_pr->shared_cpu_map,
519 for_each_possible_cpu(i) {
520 pr = all_cpu_data[i];
524 /* Assume no coordination on any error parsing domain info */
526 cpumask_clear(pr->shared_cpu_map);
527 cpumask_set_cpu(i, pr->shared_cpu_map);
528 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
532 free_cpumask_var(covered_cpus);
535 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
537 static int register_pcc_channel(int pcc_ss_idx)
539 struct acpi_pcct_hw_reduced *cppc_ss;
542 if (pcc_ss_idx >= 0) {
543 pcc_data[pcc_ss_idx]->pcc_channel =
544 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
546 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
547 pr_err("Failed to find PCC channel for subspace %d\n",
553 * The PCC mailbox controller driver should
554 * have parsed the PCCT (global table of all
555 * PCC channels) and stored pointers to the
556 * subspace communication region in con_priv.
558 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
561 pr_err("No PCC subspace found for %d CPPC\n",
567 * cppc_ss->latency is just a Nominal value. In reality
568 * the remote processor could be much slower to reply.
569 * So add an arbitrary amount of wait on top of Nominal.
571 usecs_lat = NUM_RETRIES * cppc_ss->latency;
572 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
573 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
574 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
575 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
577 pcc_data[pcc_ss_idx]->pcc_comm_addr =
578 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
579 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
580 pr_err("Failed to ioremap PCC comm region mem for %d\n",
585 /* Set flag so that we dont come here for each CPU. */
586 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
593 * cpc_ffh_supported() - check if FFH reading supported
595 * Check if the architecture has support for functional fixed hardware
596 * read/write capability.
598 * Return: true for supported, false for not supported
600 bool __weak cpc_ffh_supported(void)
606 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
608 * Check and allocate the cppc_pcc_data memory.
609 * In some processor configurations it is possible that same subspace
610 * is shared between multiple CPU's. This is seen especially in CPU's
611 * with hardware multi-threading support.
613 * Return: 0 for success, errno for failure
615 int pcc_data_alloc(int pcc_ss_id)
617 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
620 if (pcc_data[pcc_ss_id]) {
621 pcc_data[pcc_ss_id]->refcount++;
623 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
625 if (!pcc_data[pcc_ss_id])
627 pcc_data[pcc_ss_id]->refcount++;
634 * An example CPC table looks like the following.
636 * Name(_CPC, Package()
642 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
643 * // Highest Performance
644 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
645 * // Nominal Performance
646 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
647 * // Lowest Nonlinear Performance
648 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
649 * // Lowest Performance
650 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
651 * // Guaranteed Performance Register
652 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
653 * // Desired Performance Register
654 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
660 * Each Register() encodes how to access that specific register.
661 * e.g. a sample PCC entry has the following encoding:
665 * AddressSpaceKeyword
669 * //RegisterBitOffset
673 * //AccessSize (subspace ID)
680 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
681 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
683 * Return: 0 for success or negative value for err.
685 int acpi_cppc_processor_probe(struct acpi_processor *pr)
687 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
688 union acpi_object *out_obj, *cpc_obj;
689 struct cpc_desc *cpc_ptr;
690 struct cpc_reg *gas_t;
691 struct device *cpu_dev;
692 acpi_handle handle = pr->handle;
693 unsigned int num_ent, i, cpc_rev;
694 int pcc_subspace_id = -1;
698 /* Parse the ACPI _CPC table for this cpu. */
699 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
701 if (ACPI_FAILURE(status)) {
706 out_obj = (union acpi_object *) output.pointer;
708 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
714 /* First entry is NumEntries. */
715 cpc_obj = &out_obj->package.elements[0];
716 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
717 num_ent = cpc_obj->integer.value;
719 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
724 pr_debug("Unexpected entry type(%d) for NumEntries\n",
729 /* Second entry should be revision. */
730 cpc_obj = &out_obj->package.elements[1];
731 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
732 cpc_rev = cpc_obj->integer.value;
734 pr_debug("Unexpected entry type(%d) for Revision\n",
739 if (cpc_rev < CPPC_V2_REV) {
740 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
746 * Disregard _CPC if the number of entries in the return pachage is not
747 * as expected, but support future revisions being proper supersets of
748 * the v3 and only causing more entries to be returned by _CPC.
750 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
751 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
752 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
753 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
757 if (cpc_rev > CPPC_V3_REV) {
758 num_ent = CPPC_V3_NUM_ENT;
759 cpc_rev = CPPC_V3_REV;
762 cpc_ptr->num_entries = num_ent;
763 cpc_ptr->version = cpc_rev;
765 /* Iterate through remaining entries in _CPC */
766 for (i = 2; i < num_ent; i++) {
767 cpc_obj = &out_obj->package.elements[i];
769 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
770 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
771 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
772 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
773 gas_t = (struct cpc_reg *)
774 cpc_obj->buffer.pointer;
777 * The PCC Subspace index is encoded inside
778 * the CPC table entries. The same PCC index
779 * will be used for all the PCC entries,
780 * so extract it only once.
782 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
783 if (pcc_subspace_id < 0) {
784 pcc_subspace_id = gas_t->access_width;
785 if (pcc_data_alloc(pcc_subspace_id))
787 } else if (pcc_subspace_id != gas_t->access_width) {
788 pr_debug("Mismatched PCC ids.\n");
791 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
792 if (gas_t->address) {
795 addr = ioremap(gas_t->address, gas_t->bit_width/8);
798 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
801 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
802 /* Support only PCC ,SYS MEM and FFH type regs */
803 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
808 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
809 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
811 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
815 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
818 * Initialize the remaining cpc_regs as unsupported.
819 * Example: In case FW exposes CPPC v2, the below loop will initialize
820 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
822 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
823 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
824 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
828 /* Store CPU Logical ID */
829 cpc_ptr->cpu_id = pr->id;
831 /* Parse PSD data for this CPU */
832 ret = acpi_get_psd(cpc_ptr, handle);
836 /* Register PCC channel once for all PCC subspace id. */
837 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
838 ret = register_pcc_channel(pcc_subspace_id);
842 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
843 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
846 /* Everything looks okay */
847 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
849 /* Add per logical CPU nodes for reading its feedback counters. */
850 cpu_dev = get_cpu_device(pr->id);
856 /* Plug PSD data into this CPUs CPC descriptor. */
857 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
859 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
862 per_cpu(cpc_desc_ptr, pr->id) = NULL;
863 kobject_put(&cpc_ptr->kobj);
867 kfree(output.pointer);
871 /* Free all the mapped sys mem areas for this CPU */
872 for (i = 2; i < cpc_ptr->num_entries; i++) {
873 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
881 kfree(output.pointer);
884 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
887 * acpi_cppc_processor_exit - Cleanup CPC structs.
888 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
892 void acpi_cppc_processor_exit(struct acpi_processor *pr)
894 struct cpc_desc *cpc_ptr;
897 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
899 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
900 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
901 pcc_data[pcc_ss_id]->refcount--;
902 if (!pcc_data[pcc_ss_id]->refcount) {
903 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
904 kfree(pcc_data[pcc_ss_id]);
905 pcc_data[pcc_ss_id] = NULL;
910 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
914 /* Free all the mapped sys mem areas for this CPU */
915 for (i = 2; i < cpc_ptr->num_entries; i++) {
916 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
921 kobject_put(&cpc_ptr->kobj);
924 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
927 * cpc_read_ffh() - Read FFH register
928 * @cpunum: cpu number to read
929 * @reg: cppc register information
930 * @val: place holder for return value
932 * Read bit_width bits from a specified address and bit_offset
934 * Return: 0 for success and error code
936 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
942 * cpc_write_ffh() - Write FFH register
943 * @cpunum: cpu number to write
944 * @reg: cppc register information
945 * @val: value to write
947 * Write value of bit_width bits to a specified address and bit_offset
949 * Return: 0 for success and error code
951 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
957 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
958 * as fast as possible. We have already mapped the PCC subspace during init, so
959 * we can directly write to it.
962 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
965 void __iomem *vaddr = 0;
966 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
967 struct cpc_reg *reg = ®_res->cpc_entry.reg;
969 if (reg_res->type == ACPI_TYPE_INTEGER) {
970 *val = reg_res->cpc_entry.int_value;
975 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
976 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
977 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
978 vaddr = reg_res->sys_mem_vaddr;
979 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
980 return cpc_read_ffh(cpu, reg, val);
982 return acpi_os_read_memory((acpi_physical_address)reg->address,
983 val, reg->bit_width);
985 switch (reg->bit_width) {
987 *val = readb_relaxed(vaddr);
990 *val = readw_relaxed(vaddr);
993 *val = readl_relaxed(vaddr);
996 *val = readq_relaxed(vaddr);
999 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1000 reg->bit_width, pcc_ss_id);
1007 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1010 void __iomem *vaddr = 0;
1011 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1012 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1014 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1015 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1016 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1017 vaddr = reg_res->sys_mem_vaddr;
1018 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1019 return cpc_write_ffh(cpu, reg, val);
1021 return acpi_os_write_memory((acpi_physical_address)reg->address,
1022 val, reg->bit_width);
1024 switch (reg->bit_width) {
1026 writeb_relaxed(val, vaddr);
1029 writew_relaxed(val, vaddr);
1032 writel_relaxed(val, vaddr);
1035 writeq_relaxed(val, vaddr);
1038 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1039 reg->bit_width, pcc_ss_id);
1048 * cppc_get_perf_caps - Get a CPUs performance capabilities.
1049 * @cpunum: CPU from which to get capabilities info.
1050 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1052 * Return: 0 for success with perf_caps populated else -ERRNO.
1054 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1056 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1057 struct cpc_register_resource *highest_reg, *lowest_reg,
1058 *lowest_non_linear_reg, *nominal_reg,
1059 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1060 u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
1061 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1062 struct cppc_pcc_data *pcc_ss_data = NULL;
1063 int ret = 0, regs_in_pcc = 0;
1066 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1070 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1071 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1072 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1073 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1074 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1075 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1077 /* Are any of the regs PCC ?*/
1078 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1079 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1080 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1081 if (pcc_ss_id < 0) {
1082 pr_debug("Invalid pcc_ss_id\n");
1085 pcc_ss_data = pcc_data[pcc_ss_id];
1087 down_write(&pcc_ss_data->pcc_lock);
1088 /* Ring doorbell once to update PCC subspace */
1089 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1095 cpc_read(cpunum, highest_reg, &high);
1096 perf_caps->highest_perf = high;
1098 cpc_read(cpunum, lowest_reg, &low);
1099 perf_caps->lowest_perf = low;
1101 cpc_read(cpunum, nominal_reg, &nom);
1102 perf_caps->nominal_perf = nom;
1104 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1105 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1107 if (!high || !low || !nom || !min_nonlinear)
1110 /* Read optional lowest and nominal frequencies if present */
1111 if (CPC_SUPPORTED(low_freq_reg))
1112 cpc_read(cpunum, low_freq_reg, &low_f);
1114 if (CPC_SUPPORTED(nom_freq_reg))
1115 cpc_read(cpunum, nom_freq_reg, &nom_f);
1117 perf_caps->lowest_freq = low_f;
1118 perf_caps->nominal_freq = nom_f;
1123 up_write(&pcc_ss_data->pcc_lock);
1126 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1129 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1130 * @cpunum: CPU from which to read counters.
1131 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1133 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1135 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1137 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1138 struct cpc_register_resource *delivered_reg, *reference_reg,
1139 *ref_perf_reg, *ctr_wrap_reg;
1140 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1141 struct cppc_pcc_data *pcc_ss_data = NULL;
1142 u64 delivered, reference, ref_perf, ctr_wrap_time;
1143 int ret = 0, regs_in_pcc = 0;
1146 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1150 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1151 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1152 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1153 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1156 * If refernce perf register is not supported then we should
1157 * use the nominal perf value
1159 if (!CPC_SUPPORTED(ref_perf_reg))
1160 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1162 /* Are any of the regs PCC ?*/
1163 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1164 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1165 if (pcc_ss_id < 0) {
1166 pr_debug("Invalid pcc_ss_id\n");
1169 pcc_ss_data = pcc_data[pcc_ss_id];
1170 down_write(&pcc_ss_data->pcc_lock);
1172 /* Ring doorbell once to update PCC subspace */
1173 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1179 cpc_read(cpunum, delivered_reg, &delivered);
1180 cpc_read(cpunum, reference_reg, &reference);
1181 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1184 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1185 * performance counters are assumed to never wrap during the lifetime of
1188 ctr_wrap_time = (u64)(~((u64)0));
1189 if (CPC_SUPPORTED(ctr_wrap_reg))
1190 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1192 if (!delivered || !reference || !ref_perf) {
1197 perf_fb_ctrs->delivered = delivered;
1198 perf_fb_ctrs->reference = reference;
1199 perf_fb_ctrs->reference_perf = ref_perf;
1200 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1203 up_write(&pcc_ss_data->pcc_lock);
1206 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1209 * cppc_set_perf - Set a CPUs performance controls.
1210 * @cpu: CPU for which to set performance controls.
1211 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1213 * Return: 0 for success, -ERRNO otherwise.
1215 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1217 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1218 struct cpc_register_resource *desired_reg;
1219 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1220 struct cppc_pcc_data *pcc_ss_data = NULL;
1224 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1228 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1231 * This is Phase-I where we want to write to CPC registers
1232 * -> We want all CPUs to be able to execute this phase in parallel
1234 * Since read_lock can be acquired by multiple CPUs simultaneously we
1235 * achieve that goal here
1237 if (CPC_IN_PCC(desired_reg)) {
1238 if (pcc_ss_id < 0) {
1239 pr_debug("Invalid pcc_ss_id\n");
1242 pcc_ss_data = pcc_data[pcc_ss_id];
1243 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1244 if (pcc_ss_data->platform_owns_pcc) {
1245 ret = check_pcc_chan(pcc_ss_id, false);
1247 up_read(&pcc_ss_data->pcc_lock);
1252 * Update the pending_write to make sure a PCC CMD_READ will not
1253 * arrive and steal the channel during the switch to write lock
1255 pcc_ss_data->pending_pcc_write_cmd = true;
1256 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1257 cpc_desc->write_cmd_status = 0;
1261 * Skip writing MIN/MAX until Linux knows how to come up with
1264 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1266 if (CPC_IN_PCC(desired_reg))
1267 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1269 * This is Phase-II where we transfer the ownership of PCC to Platform
1271 * Short Summary: Basically if we think of a group of cppc_set_perf
1272 * requests that happened in short overlapping interval. The last CPU to
1273 * come out of Phase-I will enter Phase-II and ring the doorbell.
1275 * We have the following requirements for Phase-II:
1276 * 1. We want to execute Phase-II only when there are no CPUs
1277 * currently executing in Phase-I
1278 * 2. Once we start Phase-II we want to avoid all other CPUs from
1280 * 3. We want only one CPU among all those who went through Phase-I
1283 * If write_trylock fails to get the lock and doesn't transfer the
1284 * PCC ownership to the platform, then one of the following will be TRUE
1285 * 1. There is at-least one CPU in Phase-I which will later execute
1286 * write_trylock, so the CPUs in Phase-I will be responsible for
1287 * executing the Phase-II.
1288 * 2. Some other CPU has beaten this CPU to successfully execute the
1289 * write_trylock and has already acquired the write_lock. We know for a
1290 * fact it(other CPU acquiring the write_lock) couldn't have happened
1291 * before this CPU's Phase-I as we held the read_lock.
1292 * 3. Some other CPU executing pcc CMD_READ has stolen the
1293 * down_write, in which case, send_pcc_cmd will check for pending
1294 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1295 * So this CPU can be certain that its request will be delivered
1296 * So in all cases, this CPU knows that its request will be delivered
1297 * by another CPU and can return
1299 * After getting the down_write we still need to check for
1300 * pending_pcc_write_cmd to take care of the following scenario
1301 * The thread running this code could be scheduled out between
1302 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1303 * could have delivered the request to Platform by triggering the
1304 * doorbell and transferred the ownership of PCC to platform. So this
1305 * avoids triggering an unnecessary doorbell and more importantly before
1306 * triggering the doorbell it makes sure that the PCC channel ownership
1307 * is still with OSPM.
1308 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1309 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1310 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1311 * case during a CMD_READ and if there are pending writes it delivers
1312 * the write command before servicing the read command
1314 if (CPC_IN_PCC(desired_reg)) {
1315 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1316 /* Update only if there are pending write commands */
1317 if (pcc_ss_data->pending_pcc_write_cmd)
1318 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1319 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1321 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1322 wait_event(pcc_ss_data->pcc_write_wait_q,
1323 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1325 /* send_pcc_cmd updates the status in case of failure */
1326 ret = cpc_desc->write_cmd_status;
1330 EXPORT_SYMBOL_GPL(cppc_set_perf);
1333 * cppc_get_transition_latency - returns frequency transition latency in ns
1335 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1336 * transition latency for perfromance change requests. The closest we have
1337 * is the timing information from the PCCT tables which provides the info
1338 * on the number and frequency of PCC commands the platform can handle.
1340 unsigned int cppc_get_transition_latency(int cpu_num)
1343 * Expected transition latency is based on the PCCT timing values
1344 * Below are definition from ACPI spec:
1345 * pcc_nominal- Expected latency to process a command, in microseconds
1346 * pcc_mpar - The maximum number of periodic requests that the subspace
1347 * channel can support, reported in commands per minute. 0
1348 * indicates no limitation.
1349 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1350 * completion of a command before issuing the next command,
1353 unsigned int latency_ns = 0;
1354 struct cpc_desc *cpc_desc;
1355 struct cpc_register_resource *desired_reg;
1356 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1357 struct cppc_pcc_data *pcc_ss_data;
1359 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1361 return CPUFREQ_ETERNAL;
1363 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1364 if (!CPC_IN_PCC(desired_reg))
1365 return CPUFREQ_ETERNAL;
1368 return CPUFREQ_ETERNAL;
1370 pcc_ss_data = pcc_data[pcc_ss_id];
1371 if (pcc_ss_data->pcc_mpar)
1372 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1374 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1375 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1379 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);