1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) driver for
4 * interfacing with the CPUfreq layer and governors. See
5 * cppc_acpi.c for CPPC specific methods.
7 * (C) Copyright 2014, 2015 Linaro Ltd.
8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
13 #include <linux/arch_topology.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/dmi.h>
20 #include <linux/irq_work.h>
21 #include <linux/kthread.h>
22 #include <linux/time.h>
23 #include <linux/vmalloc.h>
24 #include <uapi/linux/sched/types.h>
26 #include <asm/unaligned.h>
28 #include <acpi/cppc_acpi.h>
30 /* Minimum struct length needed for the DMI processor entry we want */
31 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
33 /* Offset in the DMI processor structure for the max frequency */
34 #define DMI_PROCESSOR_MAX_SPEED 0x14
37 * This list contains information parsed from per CPU ACPI _CPC and _PSD
38 * structures: e.g. the highest and lowest supported performance, capabilities,
39 * desired performance, level requested etc. Depending on the share_type, not
40 * all CPUs will have an entry in the list.
42 static LIST_HEAD(cpu_data_list);
44 static bool boost_supported;
46 struct cppc_workaround_oem_info {
47 char oem_id[ACPI_OEM_ID_SIZE + 1];
48 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
52 static struct cppc_workaround_oem_info wa_info[] = {
55 .oem_table_id = "HIP07 ",
59 .oem_table_id = "HIP08 ",
64 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
66 /* Frequency invariance support */
67 struct cppc_freq_invariance {
69 struct irq_work irq_work;
70 struct kthread_work work;
71 struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
72 struct cppc_cpudata *cpu_data;
75 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
76 static struct kthread_worker *kworker_fie;
78 static struct cpufreq_driver cppc_cpufreq_driver;
79 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
80 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
81 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
82 struct cppc_perf_fb_ctrs *fb_ctrs_t1);
85 * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
86 * @work: The work item.
88 * The CPPC driver register itself with the topology core to provide its own
89 * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
90 * gets called by the scheduler on every tick.
92 * Note that the arch specific counters have higher priority than CPPC counters,
93 * if available, though the CPPC driver doesn't need to have any special
96 * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
97 * reach here from hard-irq context), which then schedules a normal work item
98 * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
99 * based on the counter updates since the last tick.
101 static void cppc_scale_freq_workfn(struct kthread_work *work)
103 struct cppc_freq_invariance *cppc_fi;
104 struct cppc_perf_fb_ctrs fb_ctrs = {0};
105 struct cppc_cpudata *cpu_data;
106 unsigned long local_freq_scale;
109 cppc_fi = container_of(work, struct cppc_freq_invariance, work);
110 cpu_data = cppc_fi->cpu_data;
112 if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
113 pr_warn("%s: failed to read perf counters\n", __func__);
117 perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
119 cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
121 perf <<= SCHED_CAPACITY_SHIFT;
122 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
124 /* This can happen due to counter's overflow */
125 if (unlikely(local_freq_scale > 1024))
126 local_freq_scale = 1024;
128 per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
131 static void cppc_irq_work(struct irq_work *irq_work)
133 struct cppc_freq_invariance *cppc_fi;
135 cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
136 kthread_queue_work(kworker_fie, &cppc_fi->work);
139 static void cppc_scale_freq_tick(void)
141 struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
144 * cppc_get_perf_ctrs() can potentially sleep, call that from the right
147 irq_work_queue(&cppc_fi->irq_work);
150 static struct scale_freq_data cppc_sftd = {
151 .source = SCALE_FREQ_SOURCE_CPPC,
152 .set_freq_scale = cppc_scale_freq_tick,
155 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
157 struct cppc_freq_invariance *cppc_fi;
160 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
163 for_each_cpu(cpu, policy->cpus) {
164 cppc_fi = &per_cpu(cppc_freq_inv, cpu);
166 cppc_fi->cpu_data = policy->driver_data;
167 kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
168 init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
170 ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
172 pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
176 * Don't abort if the CPU was offline while the driver
177 * was getting registered.
184 /* Register for freq-invariance */
185 topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
189 * We free all the resources on policy's removal and not on CPU removal as the
190 * irq-work are per-cpu and the hotplug core takes care of flushing the pending
191 * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
192 * fires on another CPU after the concerned CPU is removed, it won't harm.
194 * We just need to make sure to remove them all on policy->exit().
196 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
198 struct cppc_freq_invariance *cppc_fi;
201 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
204 /* policy->cpus will be empty here, use related_cpus instead */
205 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
207 for_each_cpu(cpu, policy->related_cpus) {
208 cppc_fi = &per_cpu(cppc_freq_inv, cpu);
209 irq_work_sync(&cppc_fi->irq_work);
210 kthread_cancel_work_sync(&cppc_fi->work);
214 static void __init cppc_freq_invariance_init(void)
216 struct sched_attr attr = {
217 .size = sizeof(struct sched_attr),
218 .sched_policy = SCHED_DEADLINE,
222 * Fake (unused) bandwidth; workaround to "fix"
223 * priority inheritance.
225 .sched_runtime = 1000000,
226 .sched_deadline = 10000000,
227 .sched_period = 10000000,
231 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
234 kworker_fie = kthread_create_worker(0, "cppc_fie");
235 if (IS_ERR(kworker_fie))
238 ret = sched_setattr_nocheck(kworker_fie->task, &attr);
240 pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
242 kthread_destroy_worker(kworker_fie);
247 static void cppc_freq_invariance_exit(void)
249 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
252 kthread_destroy_worker(kworker_fie);
257 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
261 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
265 static inline void cppc_freq_invariance_init(void)
269 static inline void cppc_freq_invariance_exit(void)
272 #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
274 /* Callback function used to retrieve the max frequency from DMI */
275 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
277 const u8 *dmi_data = (const u8 *)dm;
278 u16 *mhz = (u16 *)private;
280 if (dm->type == DMI_ENTRY_PROCESSOR &&
281 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
282 u16 val = (u16)get_unaligned((const u16 *)
283 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
284 *mhz = val > *mhz ? val : *mhz;
288 /* Look up the max frequency in DMI */
289 static u64 cppc_get_dmi_max_khz(void)
293 dmi_walk(cppc_find_dmi_mhz, &mhz);
296 * Real stupid fallback value, just in case there is no
305 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
306 * use them to convert perf to freq and vice versa. The conversion is
307 * extrapolated as an affine function passing by the 2 points:
308 * - (Low perf, Low freq)
309 * - (Nominal perf, Nominal perf)
311 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
314 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
315 s64 retval, offset = 0;
319 if (caps->lowest_freq && caps->nominal_freq) {
320 mul = caps->nominal_freq - caps->lowest_freq;
321 div = caps->nominal_perf - caps->lowest_perf;
322 offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
325 max_khz = cppc_get_dmi_max_khz();
327 div = caps->highest_perf;
330 retval = offset + div64_u64(perf * mul, div);
336 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
339 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
340 s64 retval, offset = 0;
344 if (caps->lowest_freq && caps->nominal_freq) {
345 mul = caps->nominal_perf - caps->lowest_perf;
346 div = caps->nominal_freq - caps->lowest_freq;
347 offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
350 max_khz = cppc_get_dmi_max_khz();
351 mul = caps->highest_perf;
355 retval = offset + div64_u64(freq * mul, div);
361 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
362 unsigned int target_freq,
363 unsigned int relation)
366 struct cppc_cpudata *cpu_data = policy->driver_data;
367 unsigned int cpu = policy->cpu;
368 struct cpufreq_freqs freqs;
372 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
373 /* Return if it is exactly the same perf */
374 if (desired_perf == cpu_data->perf_ctrls.desired_perf)
377 cpu_data->perf_ctrls.desired_perf = desired_perf;
378 freqs.old = policy->cur;
379 freqs.new = target_freq;
381 cpufreq_freq_transition_begin(policy, &freqs);
382 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
383 cpufreq_freq_transition_end(policy, &freqs, ret != 0);
386 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
392 static int cppc_verify_policy(struct cpufreq_policy_data *policy)
394 cpufreq_verify_within_cpu_limits(policy);
399 * The PCC subspace describes the rate at which platform can accept commands
400 * on the shared PCC channel (including READs which do not count towards freq
401 * transition requests), so ideally we need to use the PCC values as a fallback
402 * if we don't have a platform specific transition_delay_us
405 #include <asm/cputype.h>
407 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
409 unsigned long implementor = read_cpuid_implementor();
410 unsigned long part_num = read_cpuid_part_number();
412 switch (implementor) {
413 case ARM_CPU_IMP_QCOM:
415 case QCOM_CPU_PART_FALKOR_V1:
416 case QCOM_CPU_PART_FALKOR:
420 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
425 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
427 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
432 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
434 struct cppc_cpudata *cpu_data;
437 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
441 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
444 ret = acpi_get_psd_map(cpu, cpu_data);
446 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
450 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
452 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
456 /* Convert the lowest and nominal freq from MHz to KHz */
457 cpu_data->perf_caps.lowest_freq *= 1000;
458 cpu_data->perf_caps.nominal_freq *= 1000;
460 list_add(&cpu_data->node, &cpu_data_list);
465 free_cpumask_var(cpu_data->shared_cpu_map);
472 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
474 struct cppc_cpudata *cpu_data = policy->driver_data;
476 list_del(&cpu_data->node);
477 free_cpumask_var(cpu_data->shared_cpu_map);
479 policy->driver_data = NULL;
482 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
484 unsigned int cpu = policy->cpu;
485 struct cppc_cpudata *cpu_data;
486 struct cppc_perf_caps *caps;
489 cpu_data = cppc_cpufreq_get_cpu_data(cpu);
491 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
494 caps = &cpu_data->perf_caps;
495 policy->driver_data = cpu_data;
498 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
499 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
501 policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
502 caps->lowest_nonlinear_perf);
503 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
507 * Set cpuinfo.min_freq to Lowest to make the full range of performance
508 * available if userspace wants to use any perf between lowest & lowest
511 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
513 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
516 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
517 policy->shared_type = cpu_data->shared_type;
519 switch (policy->shared_type) {
520 case CPUFREQ_SHARED_TYPE_HW:
521 case CPUFREQ_SHARED_TYPE_NONE:
522 /* Nothing to be done - we'll have a policy for each CPU */
524 case CPUFREQ_SHARED_TYPE_ANY:
526 * All CPUs in the domain will share a policy and all cpufreq
527 * operations will use a single cppc_cpudata structure stored
528 * in policy->driver_data.
530 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
533 pr_debug("Unsupported CPU co-ord type: %d\n",
534 policy->shared_type);
540 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
543 if (caps->highest_perf > caps->nominal_perf)
544 boost_supported = true;
546 /* Set policy->cur to max now. The governors will adjust later. */
547 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
548 cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
550 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
552 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
553 caps->highest_perf, cpu, ret);
557 cppc_cpufreq_cpu_fie_init(policy);
561 cppc_cpufreq_put_cpu_data(policy);
565 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
567 struct cppc_cpudata *cpu_data = policy->driver_data;
568 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
569 unsigned int cpu = policy->cpu;
572 cppc_cpufreq_cpu_fie_exit(policy);
574 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
576 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
578 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
579 caps->lowest_perf, cpu, ret);
581 cppc_cpufreq_put_cpu_data(policy);
585 static inline u64 get_delta(u64 t1, u64 t0)
587 if (t1 > t0 || t0 > ~(u32)0)
590 return (u32)t1 - (u32)t0;
593 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
594 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
595 struct cppc_perf_fb_ctrs *fb_ctrs_t1)
597 u64 delta_reference, delta_delivered;
600 reference_perf = fb_ctrs_t0->reference_perf;
602 delta_reference = get_delta(fb_ctrs_t1->reference,
603 fb_ctrs_t0->reference);
604 delta_delivered = get_delta(fb_ctrs_t1->delivered,
605 fb_ctrs_t0->delivered);
607 /* Check to avoid divide-by zero and invalid delivered_perf */
608 if (!delta_reference || !delta_delivered)
609 return cpu_data->perf_ctrls.desired_perf;
611 return (reference_perf * delta_delivered) / delta_reference;
614 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
616 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
617 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
618 struct cppc_cpudata *cpu_data = policy->driver_data;
622 cpufreq_cpu_put(policy);
624 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
628 udelay(2); /* 2usec delay between sampling */
630 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
634 delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
637 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
640 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
642 struct cppc_cpudata *cpu_data = policy->driver_data;
643 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
646 if (!boost_supported) {
647 pr_err("BOOST not supported by CPU or firmware\n");
652 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
655 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
657 policy->cpuinfo.max_freq = policy->max;
659 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
666 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
668 struct cppc_cpudata *cpu_data = policy->driver_data;
670 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
672 cpufreq_freq_attr_ro(freqdomain_cpus);
674 static struct freq_attr *cppc_cpufreq_attr[] = {
679 static struct cpufreq_driver cppc_cpufreq_driver = {
680 .flags = CPUFREQ_CONST_LOOPS,
681 .verify = cppc_verify_policy,
682 .target = cppc_cpufreq_set_target,
683 .get = cppc_cpufreq_get_rate,
684 .init = cppc_cpufreq_cpu_init,
685 .exit = cppc_cpufreq_cpu_exit,
686 .set_boost = cppc_cpufreq_set_boost,
687 .attr = cppc_cpufreq_attr,
688 .name = "cppc_cpufreq",
692 * HISI platform does not support delivered performance counter and
693 * reference performance counter. It can calculate the performance using the
694 * platform specific mechanism. We reuse the desired performance register to
695 * store the real performance calculated by the platform.
697 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
699 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
700 struct cppc_cpudata *cpu_data = policy->driver_data;
704 cpufreq_cpu_put(policy);
706 ret = cppc_get_desired_perf(cpu, &desired_perf);
710 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
713 static void cppc_check_hisi_workaround(void)
715 struct acpi_table_header *tbl;
716 acpi_status status = AE_OK;
719 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
720 if (ACPI_FAILURE(status) || !tbl)
723 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
724 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
725 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
726 wa_info[i].oem_revision == tbl->oem_revision) {
727 /* Overwrite the get() callback */
728 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
736 static int __init cppc_cpufreq_init(void)
740 if ((acpi_disabled) || !acpi_cpc_valid())
743 INIT_LIST_HEAD(&cpu_data_list);
745 cppc_check_hisi_workaround();
746 cppc_freq_invariance_init();
748 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
750 cppc_freq_invariance_exit();
755 static inline void free_cpu_data(void)
757 struct cppc_cpudata *iter, *tmp;
759 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
760 free_cpumask_var(iter->shared_cpu_map);
761 list_del(&iter->node);
767 static void __exit cppc_cpufreq_exit(void)
769 cpufreq_unregister_driver(&cppc_cpufreq_driver);
770 cppc_freq_invariance_exit();
775 module_exit(cppc_cpufreq_exit);
776 MODULE_AUTHOR("Ashwin Chaugule");
777 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
778 MODULE_LICENSE("GPL");
780 late_initcall(cppc_cpufreq_init);
782 static const struct acpi_device_id cppc_acpi_ids[] __used = {
783 {ACPI_PROCESSOR_DEVICE_HID, },
787 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);