2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
26 static bool __init acpi_cpu_is_threaded(int cpu)
28 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
31 * if the PPTT doesn't have thread information, assume a homogeneous
32 * machine and return the current CPU's thread state.
35 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
41 * Propagate the topology information of the processor_topology_node tree to the
44 int __init parse_acpi_topology(void)
51 for_each_possible_cpu(cpu) {
54 topology_id = find_acpi_cpu_topology(cpu, 0);
58 if (acpi_cpu_is_threaded(cpu)) {
59 cpu_topology[cpu].thread_id = topology_id;
60 topology_id = find_acpi_cpu_topology(cpu, 1);
61 cpu_topology[cpu].core_id = topology_id;
63 cpu_topology[cpu].thread_id = -1;
64 cpu_topology[cpu].core_id = topology_id;
66 topology_id = find_acpi_cpu_topology_package(cpu);
67 cpu_topology[cpu].package_id = topology_id;
69 i = acpi_find_last_cache_level(cpu);
73 * this is the only part of cpu_topology that has
74 * a direct relationship with the cache topology
76 cache_id = find_acpi_cpu_cache_topology(cpu, i);
78 cpu_topology[cpu].llc_id = cache_id;
86 #ifdef CONFIG_ARM64_AMU_EXTN
89 #define pr_fmt(fmt) "AMU: " fmt
91 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
92 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
93 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
94 static cpumask_var_t amu_fie_cpus;
96 /* Initialize counter reference per-cpu variables for the current CPU */
97 void init_cpu_freq_invariance_counters(void)
99 this_cpu_write(arch_core_cycles_prev,
100 read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
101 this_cpu_write(arch_const_cycles_prev,
102 read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
105 static int validate_cpu_freq_invariance_counters(int cpu)
107 u64 max_freq_hz, ratio;
109 if (!cpu_has_amu_feat(cpu)) {
110 pr_debug("CPU%d: counters are not supported.\n", cpu);
114 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
115 !per_cpu(arch_core_cycles_prev, cpu))) {
116 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
120 /* Convert maximum frequency from KHz to Hz and validate */
121 max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL;
122 if (unlikely(!max_freq_hz)) {
123 pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
128 * Pre-compute the fixed ratio between the frequency of the constant
129 * counter and the maximum frequency of the CPU.
132 * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALEĀ²
135 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
136 * in order to ensure a good resolution for arch_max_freq_scale for
137 * very low arch timer frequencies (down to the KHz range which should
140 ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
141 ratio = div64_u64(ratio, max_freq_hz);
143 WARN_ONCE(1, "System timer frequency too low.\n");
147 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
153 enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
155 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
158 pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
162 if (cpumask_subset(policy->related_cpus, valid_cpus))
163 cpumask_or(amu_fie_cpus, policy->related_cpus,
166 cpufreq_cpu_put(policy);
171 static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
172 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
174 static int __init init_amu_fie(void)
176 cpumask_var_t valid_cpus;
177 bool have_policy = false;
181 if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
184 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
186 goto free_valid_mask;
189 for_each_present_cpu(cpu) {
190 if (validate_cpu_freq_invariance_counters(cpu))
192 cpumask_set_cpu(cpu, valid_cpus);
193 have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
197 * If we are not restricted by cpufreq policies, we only enable
198 * the use of the AMU feature for FIE if all CPUs support AMU.
199 * Otherwise, enable_policy_freq_counters has already enabled
202 if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
203 cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
205 if (!cpumask_empty(amu_fie_cpus)) {
206 pr_info("CPUs[%*pbl]: counters will be used for FIE.",
207 cpumask_pr_args(amu_fie_cpus));
208 static_branch_enable(&amu_fie_key);
212 * If the system is not fully invariant after AMU init, disable
213 * partial use of counters for frequency invariance.
215 if (!topology_scale_freq_invariant())
216 static_branch_disable(&amu_fie_key);
219 free_cpumask_var(valid_cpus);
223 late_initcall_sync(init_amu_fie);
225 bool arch_freq_counters_available(const struct cpumask *cpus)
227 return amu_freq_invariant() &&
228 cpumask_subset(cpus, amu_fie_cpus);
231 void topology_scale_freq_tick(void)
233 u64 prev_core_cnt, prev_const_cnt;
234 u64 core_cnt, const_cnt, scale;
235 int cpu = smp_processor_id();
237 if (!amu_freq_invariant())
240 if (!cpumask_test_cpu(cpu, amu_fie_cpus))
243 const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
244 core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
245 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
246 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
248 if (unlikely(core_cnt <= prev_core_cnt ||
249 const_cnt <= prev_const_cnt))
253 * /\core arch_max_freq_scale
254 * scale = ------- * --------------------
255 * /\const SCHED_CAPACITY_SCALE
257 * See validate_cpu_freq_invariance_counters() for details on
258 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
260 scale = core_cnt - prev_core_cnt;
261 scale *= this_cpu_read(arch_max_freq_scale);
262 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
263 const_cnt - prev_const_cnt);
265 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
266 this_cpu_write(freq_scale, (unsigned long)scale);
269 this_cpu_write(arch_core_cycles_prev, core_cnt);
270 this_cpu_write(arch_const_cycles_prev, const_cnt);
272 #endif /* CONFIG_ARM64_AMU_EXTN */