Linux 6.7-rc7
[linux-modified.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20
21 #include <asm/cpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
24
25 #ifdef CONFIG_ACPI
26 static bool __init acpi_cpu_is_threaded(int cpu)
27 {
28         int is_threaded = acpi_pptt_cpu_is_thread(cpu);
29
30         /*
31          * if the PPTT doesn't have thread information, assume a homogeneous
32          * machine and return the current CPU's thread state.
33          */
34         if (is_threaded < 0)
35                 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
36
37         return !!is_threaded;
38 }
39
40 /*
41  * Propagate the topology information of the processor_topology_node tree to the
42  * cpu_topology array.
43  */
44 int __init parse_acpi_topology(void)
45 {
46         int cpu, topology_id;
47
48         if (acpi_disabled)
49                 return 0;
50
51         for_each_possible_cpu(cpu) {
52                 topology_id = find_acpi_cpu_topology(cpu, 0);
53                 if (topology_id < 0)
54                         return topology_id;
55
56                 if (acpi_cpu_is_threaded(cpu)) {
57                         cpu_topology[cpu].thread_id = topology_id;
58                         topology_id = find_acpi_cpu_topology(cpu, 1);
59                         cpu_topology[cpu].core_id   = topology_id;
60                 } else {
61                         cpu_topology[cpu].thread_id  = -1;
62                         cpu_topology[cpu].core_id    = topology_id;
63                 }
64                 topology_id = find_acpi_cpu_topology_cluster(cpu);
65                 cpu_topology[cpu].cluster_id = topology_id;
66                 topology_id = find_acpi_cpu_topology_package(cpu);
67                 cpu_topology[cpu].package_id = topology_id;
68         }
69
70         return 0;
71 }
72 #endif
73
74 #ifdef CONFIG_ARM64_AMU_EXTN
75 #define read_corecnt()  read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
76 #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
77 #else
78 #define read_corecnt()  (0UL)
79 #define read_constcnt() (0UL)
80 #endif
81
82 #undef pr_fmt
83 #define pr_fmt(fmt) "AMU: " fmt
84
85 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
86 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
87 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
88 static cpumask_var_t amu_fie_cpus;
89
90 void update_freq_counters_refs(void)
91 {
92         this_cpu_write(arch_core_cycles_prev, read_corecnt());
93         this_cpu_write(arch_const_cycles_prev, read_constcnt());
94 }
95
96 static inline bool freq_counters_valid(int cpu)
97 {
98         if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
99                 return false;
100
101         if (!cpu_has_amu_feat(cpu)) {
102                 pr_debug("CPU%d: counters are not supported.\n", cpu);
103                 return false;
104         }
105
106         if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
107                      !per_cpu(arch_core_cycles_prev, cpu))) {
108                 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
109                 return false;
110         }
111
112         return true;
113 }
114
115 static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
116 {
117         u64 ratio;
118
119         if (unlikely(!max_rate || !ref_rate)) {
120                 pr_debug("CPU%d: invalid maximum or reference frequency.\n",
121                          cpu);
122                 return -EINVAL;
123         }
124
125         /*
126          * Pre-compute the fixed ratio between the frequency of the constant
127          * reference counter and the maximum frequency of the CPU.
128          *
129          *                          ref_rate
130          * arch_max_freq_scale =   ---------- * SCHED_CAPACITY_SCALEĀ²
131          *                          max_rate
132          *
133          * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
134          * in order to ensure a good resolution for arch_max_freq_scale for
135          * very low reference frequencies (down to the KHz range which should
136          * be unlikely).
137          */
138         ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
139         ratio = div64_u64(ratio, max_rate);
140         if (!ratio) {
141                 WARN_ONCE(1, "Reference frequency too low.\n");
142                 return -EINVAL;
143         }
144
145         per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
146
147         return 0;
148 }
149
150 static void amu_scale_freq_tick(void)
151 {
152         u64 prev_core_cnt, prev_const_cnt;
153         u64 core_cnt, const_cnt, scale;
154
155         prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
156         prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
157
158         update_freq_counters_refs();
159
160         const_cnt = this_cpu_read(arch_const_cycles_prev);
161         core_cnt = this_cpu_read(arch_core_cycles_prev);
162
163         if (unlikely(core_cnt <= prev_core_cnt ||
164                      const_cnt <= prev_const_cnt))
165                 return;
166
167         /*
168          *          /\core    arch_max_freq_scale
169          * scale =  ------- * --------------------
170          *          /\const   SCHED_CAPACITY_SCALE
171          *
172          * See validate_cpu_freq_invariance_counters() for details on
173          * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
174          */
175         scale = core_cnt - prev_core_cnt;
176         scale *= this_cpu_read(arch_max_freq_scale);
177         scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
178                           const_cnt - prev_const_cnt);
179
180         scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
181         this_cpu_write(arch_freq_scale, (unsigned long)scale);
182 }
183
184 static struct scale_freq_data amu_sfd = {
185         .source = SCALE_FREQ_SOURCE_ARCH,
186         .set_freq_scale = amu_scale_freq_tick,
187 };
188
189 static void amu_fie_setup(const struct cpumask *cpus)
190 {
191         int cpu;
192
193         /* We are already set since the last insmod of cpufreq driver */
194         if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
195                 return;
196
197         for_each_cpu(cpu, cpus) {
198                 if (!freq_counters_valid(cpu) ||
199                     freq_inv_set_max_ratio(cpu,
200                                            cpufreq_get_hw_max_freq(cpu) * 1000ULL,
201                                            arch_timer_get_rate()))
202                         return;
203         }
204
205         cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
206
207         topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);
208
209         pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
210                  cpumask_pr_args(cpus));
211 }
212
213 static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
214                                  void *data)
215 {
216         struct cpufreq_policy *policy = data;
217
218         if (val == CPUFREQ_CREATE_POLICY)
219                 amu_fie_setup(policy->related_cpus);
220
221         /*
222          * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU
223          * counters don't have any dependency on cpufreq driver once we have
224          * initialized AMU support and enabled invariance. The AMU counters will
225          * keep on working just fine in the absence of the cpufreq driver, and
226          * for the CPUs for which there are no counters available, the last set
227          * value of arch_freq_scale will remain valid as that is the frequency
228          * those CPUs are running at.
229          */
230
231         return 0;
232 }
233
234 static struct notifier_block init_amu_fie_notifier = {
235         .notifier_call = init_amu_fie_callback,
236 };
237
238 static int __init init_amu_fie(void)
239 {
240         int ret;
241
242         if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
243                 return -ENOMEM;
244
245         ret = cpufreq_register_notifier(&init_amu_fie_notifier,
246                                         CPUFREQ_POLICY_NOTIFIER);
247         if (ret)
248                 free_cpumask_var(amu_fie_cpus);
249
250         return ret;
251 }
252 core_initcall(init_amu_fie);
253
254 #ifdef CONFIG_ACPI_CPPC_LIB
255 #include <acpi/cppc_acpi.h>
256
257 static void cpu_read_corecnt(void *val)
258 {
259         /*
260          * A value of 0 can be returned if the current CPU does not support AMUs
261          * or if the counter is disabled for this CPU. A return value of 0 at
262          * counter read is properly handled as an error case by the users of the
263          * counter.
264          */
265         *(u64 *)val = read_corecnt();
266 }
267
268 static void cpu_read_constcnt(void *val)
269 {
270         /*
271          * Return 0 if the current CPU is affected by erratum 2457168. A value
272          * of 0 is also returned if the current CPU does not support AMUs or if
273          * the counter is disabled. A return value of 0 at counter read is
274          * properly handled as an error case by the users of the counter.
275          */
276         *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
277                       0UL : read_constcnt();
278 }
279
280 static inline
281 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
282 {
283         /*
284          * Abort call on counterless CPU or when interrupts are
285          * disabled - can lead to deadlock in smp sync call.
286          */
287         if (!cpu_has_amu_feat(cpu))
288                 return -EOPNOTSUPP;
289
290         if (WARN_ON_ONCE(irqs_disabled()))
291                 return -EPERM;
292
293         smp_call_function_single(cpu, func, val, 1);
294
295         return 0;
296 }
297
298 /*
299  * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
300  * below.
301  */
302 bool cpc_ffh_supported(void)
303 {
304         int cpu = get_cpu_with_amu_feat();
305
306         /*
307          * FFH is considered supported if there is at least one present CPU that
308          * supports AMUs. Using FFH to read core and reference counters for CPUs
309          * that do not support AMUs, have counters disabled or that are affected
310          * by errata, will result in a return value of 0.
311          *
312          * This is done to allow any enabled and valid counters to be read
313          * through FFH, knowing that potentially returning 0 as counter value is
314          * properly handled by the users of these counters.
315          */
316         if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
317                 return false;
318
319         return true;
320 }
321
322 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
323 {
324         int ret = -EOPNOTSUPP;
325
326         switch ((u64)reg->address) {
327         case 0x0:
328                 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
329                 break;
330         case 0x1:
331                 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
332                 break;
333         }
334
335         if (!ret) {
336                 *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
337                                     reg->bit_offset);
338                 *val >>= reg->bit_offset;
339         }
340
341         return ret;
342 }
343
344 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
345 {
346         return -EOPNOTSUPP;
347 }
348 #endif /* CONFIG_ACPI_CPPC_LIB */