2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
26 static bool __init acpi_cpu_is_threaded(int cpu)
28 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
31 * if the PPTT doesn't have thread information, assume a homogeneous
32 * machine and return the current CPU's thread state.
35 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
41 * Propagate the topology information of the processor_topology_node tree to the
44 int __init parse_acpi_topology(void)
51 for_each_possible_cpu(cpu) {
54 topology_id = find_acpi_cpu_topology(cpu, 0);
58 if (acpi_cpu_is_threaded(cpu)) {
59 cpu_topology[cpu].thread_id = topology_id;
60 topology_id = find_acpi_cpu_topology(cpu, 1);
61 cpu_topology[cpu].core_id = topology_id;
63 cpu_topology[cpu].thread_id = -1;
64 cpu_topology[cpu].core_id = topology_id;
66 topology_id = find_acpi_cpu_topology_package(cpu);
67 cpu_topology[cpu].package_id = topology_id;
69 i = acpi_find_last_cache_level(cpu);
73 * this is the only part of cpu_topology that has
74 * a direct relationship with the cache topology
76 cache_id = find_acpi_cpu_cache_topology(cpu, i);
78 cpu_topology[cpu].llc_id = cache_id;
86 #ifdef CONFIG_ARM64_AMU_EXTN
87 #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
88 #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
90 #define read_corecnt() (0UL)
91 #define read_constcnt() (0UL)
95 #define pr_fmt(fmt) "AMU: " fmt
97 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
98 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
99 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
100 static cpumask_var_t amu_fie_cpus;
102 void update_freq_counters_refs(void)
104 this_cpu_write(arch_core_cycles_prev, read_corecnt());
105 this_cpu_write(arch_const_cycles_prev, read_constcnt());
108 static inline bool freq_counters_valid(int cpu)
110 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
113 if (!cpu_has_amu_feat(cpu)) {
114 pr_debug("CPU%d: counters are not supported.\n", cpu);
118 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
119 !per_cpu(arch_core_cycles_prev, cpu))) {
120 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
127 static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
131 if (unlikely(!max_rate || !ref_rate)) {
132 pr_debug("CPU%d: invalid maximum or reference frequency.\n",
138 * Pre-compute the fixed ratio between the frequency of the constant
139 * reference counter and the maximum frequency of the CPU.
142 * arch_max_freq_scale = ---------- * SCHED_CAPACITY_SCALEĀ²
145 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
146 * in order to ensure a good resolution for arch_max_freq_scale for
147 * very low reference frequencies (down to the KHz range which should
150 ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
151 ratio = div64_u64(ratio, max_rate);
153 WARN_ONCE(1, "Reference frequency too low.\n");
157 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
162 static void amu_scale_freq_tick(void)
164 u64 prev_core_cnt, prev_const_cnt;
165 u64 core_cnt, const_cnt, scale;
167 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
168 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
170 update_freq_counters_refs();
172 const_cnt = this_cpu_read(arch_const_cycles_prev);
173 core_cnt = this_cpu_read(arch_core_cycles_prev);
175 if (unlikely(core_cnt <= prev_core_cnt ||
176 const_cnt <= prev_const_cnt))
180 * /\core arch_max_freq_scale
181 * scale = ------- * --------------------
182 * /\const SCHED_CAPACITY_SCALE
184 * See validate_cpu_freq_invariance_counters() for details on
185 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
187 scale = core_cnt - prev_core_cnt;
188 scale *= this_cpu_read(arch_max_freq_scale);
189 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
190 const_cnt - prev_const_cnt);
192 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
193 this_cpu_write(arch_freq_scale, (unsigned long)scale);
196 static struct scale_freq_data amu_sfd = {
197 .source = SCALE_FREQ_SOURCE_ARCH,
198 .set_freq_scale = amu_scale_freq_tick,
201 static void amu_fie_setup(const struct cpumask *cpus)
205 /* We are already set since the last insmod of cpufreq driver */
206 if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
209 for_each_cpu(cpu, cpus) {
210 if (!freq_counters_valid(cpu) ||
211 freq_inv_set_max_ratio(cpu,
212 cpufreq_get_hw_max_freq(cpu) * 1000ULL,
213 arch_timer_get_rate()))
217 cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
219 topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);
221 pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
222 cpumask_pr_args(cpus));
225 static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
228 struct cpufreq_policy *policy = data;
230 if (val == CPUFREQ_CREATE_POLICY)
231 amu_fie_setup(policy->related_cpus);
234 * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU
235 * counters don't have any dependency on cpufreq driver once we have
236 * initialized AMU support and enabled invariance. The AMU counters will
237 * keep on working just fine in the absence of the cpufreq driver, and
238 * for the CPUs for which there are no counters available, the last set
239 * value of arch_freq_scale will remain valid as that is the frequency
240 * those CPUs are running at.
246 static struct notifier_block init_amu_fie_notifier = {
247 .notifier_call = init_amu_fie_callback,
250 static int __init init_amu_fie(void)
254 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
257 ret = cpufreq_register_notifier(&init_amu_fie_notifier,
258 CPUFREQ_POLICY_NOTIFIER);
260 free_cpumask_var(amu_fie_cpus);
264 core_initcall(init_amu_fie);
266 #ifdef CONFIG_ACPI_CPPC_LIB
267 #include <acpi/cppc_acpi.h>
269 static void cpu_read_corecnt(void *val)
272 * A value of 0 can be returned if the current CPU does not support AMUs
273 * or if the counter is disabled for this CPU. A return value of 0 at
274 * counter read is properly handled as an error case by the users of the
277 *(u64 *)val = read_corecnt();
280 static void cpu_read_constcnt(void *val)
283 * Return 0 if the current CPU is affected by erratum 2457168. A value
284 * of 0 is also returned if the current CPU does not support AMUs or if
285 * the counter is disabled. A return value of 0 at counter read is
286 * properly handled as an error case by the users of the counter.
288 *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
289 0UL : read_constcnt();
293 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
296 * Abort call on counterless CPU or when interrupts are
297 * disabled - can lead to deadlock in smp sync call.
299 if (!cpu_has_amu_feat(cpu))
302 if (WARN_ON_ONCE(irqs_disabled()))
305 smp_call_function_single(cpu, func, val, 1);
311 * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
314 bool cpc_ffh_supported(void)
316 int cpu = get_cpu_with_amu_feat();
319 * FFH is considered supported if there is at least one present CPU that
320 * supports AMUs. Using FFH to read core and reference counters for CPUs
321 * that do not support AMUs, have counters disabled or that are affected
322 * by errata, will result in a return value of 0.
324 * This is done to allow any enabled and valid counters to be read
325 * through FFH, knowing that potentially returning 0 as counter value is
326 * properly handled by the users of these counters.
328 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
334 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
336 int ret = -EOPNOTSUPP;
338 switch ((u64)reg->address) {
340 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
343 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
348 *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
350 *val >>= reg->bit_offset;
356 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
360 #endif /* CONFIG_ACPI_CPPC_LIB */