GNU Linux-libre 6.8.7-gnu
[releases.git] / arch / riscv / kernel / sys_hwprobe.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/sbi.h>
12 #include <asm/switch_to.h>
13 #include <asm/uaccess.h>
14 #include <asm/unistd.h>
15 #include <asm/vector.h>
16 #include <vdso/vsyscall.h>
17
18
19 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
20                             const struct cpumask *cpus)
21 {
22         u64 id = -1ULL;
23         bool first = true;
24         int cpu;
25
26         for_each_cpu(cpu, cpus) {
27                 u64 cpu_id;
28
29                 switch (pair->key) {
30                 case RISCV_HWPROBE_KEY_MVENDORID:
31                         cpu_id = riscv_cached_mvendorid(cpu);
32                         break;
33                 case RISCV_HWPROBE_KEY_MIMPID:
34                         cpu_id = riscv_cached_mimpid(cpu);
35                         break;
36                 case RISCV_HWPROBE_KEY_MARCHID:
37                         cpu_id = riscv_cached_marchid(cpu);
38                         break;
39                 }
40
41                 if (first) {
42                         id = cpu_id;
43                         first = false;
44                 }
45
46                 /*
47                  * If there's a mismatch for the given set, return -1 in the
48                  * value.
49                  */
50                 if (id != cpu_id) {
51                         id = -1ULL;
52                         break;
53                 }
54         }
55
56         pair->value = id;
57 }
58
59 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
60                              const struct cpumask *cpus)
61 {
62         int cpu;
63         u64 missing = 0;
64
65         pair->value = 0;
66         if (has_fpu())
67                 pair->value |= RISCV_HWPROBE_IMA_FD;
68
69         if (riscv_isa_extension_available(NULL, c))
70                 pair->value |= RISCV_HWPROBE_IMA_C;
71
72         if (has_vector())
73                 pair->value |= RISCV_HWPROBE_IMA_V;
74
75         /*
76          * Loop through and record extensions that 1) anyone has, and 2) anyone
77          * doesn't have.
78          */
79         for_each_cpu(cpu, cpus) {
80                 struct riscv_isainfo *isainfo = &hart_isa[cpu];
81
82 #define EXT_KEY(ext)                                                                    \
83         do {                                                                            \
84                 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
85                         pair->value |= RISCV_HWPROBE_EXT_##ext;                         \
86                 else                                                                    \
87                         missing |= RISCV_HWPROBE_EXT_##ext;                             \
88         } while (false)
89
90                 /*
91                  * Only use EXT_KEY() for extensions which can be exposed to userspace,
92                  * regardless of the kernel's configuration, as no other checks, besides
93                  * presence in the hart_isa bitmap, are made.
94                  */
95                 EXT_KEY(ZBA);
96                 EXT_KEY(ZBB);
97                 EXT_KEY(ZBS);
98                 EXT_KEY(ZICBOZ);
99                 EXT_KEY(ZBC);
100
101                 EXT_KEY(ZBKB);
102                 EXT_KEY(ZBKC);
103                 EXT_KEY(ZBKX);
104                 EXT_KEY(ZKND);
105                 EXT_KEY(ZKNE);
106                 EXT_KEY(ZKNH);
107                 EXT_KEY(ZKSED);
108                 EXT_KEY(ZKSH);
109                 EXT_KEY(ZKT);
110                 EXT_KEY(ZIHINTNTL);
111                 EXT_KEY(ZTSO);
112                 EXT_KEY(ZACAS);
113                 EXT_KEY(ZICOND);
114
115                 if (has_vector()) {
116                         EXT_KEY(ZVBB);
117                         EXT_KEY(ZVBC);
118                         EXT_KEY(ZVKB);
119                         EXT_KEY(ZVKG);
120                         EXT_KEY(ZVKNED);
121                         EXT_KEY(ZVKNHA);
122                         EXT_KEY(ZVKNHB);
123                         EXT_KEY(ZVKSED);
124                         EXT_KEY(ZVKSH);
125                         EXT_KEY(ZVKT);
126                         EXT_KEY(ZVFH);
127                         EXT_KEY(ZVFHMIN);
128                 }
129
130                 if (has_fpu()) {
131                         EXT_KEY(ZFH);
132                         EXT_KEY(ZFHMIN);
133                         EXT_KEY(ZFA);
134                 }
135 #undef EXT_KEY
136         }
137
138         /* Now turn off reporting features if any CPU is missing it. */
139         pair->value &= ~missing;
140 }
141
142 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
143 {
144         struct riscv_hwprobe pair;
145
146         hwprobe_isa_ext0(&pair, cpus);
147         return (pair.value & ext);
148 }
149
150 static u64 hwprobe_misaligned(const struct cpumask *cpus)
151 {
152         int cpu;
153         u64 perf = -1ULL;
154
155         for_each_cpu(cpu, cpus) {
156                 int this_perf = per_cpu(misaligned_access_speed, cpu);
157
158                 if (perf == -1ULL)
159                         perf = this_perf;
160
161                 if (perf != this_perf) {
162                         perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
163                         break;
164                 }
165         }
166
167         if (perf == -1ULL)
168                 return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
169
170         return perf;
171 }
172
173 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
174                              const struct cpumask *cpus)
175 {
176         switch (pair->key) {
177         case RISCV_HWPROBE_KEY_MVENDORID:
178         case RISCV_HWPROBE_KEY_MARCHID:
179         case RISCV_HWPROBE_KEY_MIMPID:
180                 hwprobe_arch_id(pair, cpus);
181                 break;
182         /*
183          * The kernel already assumes that the base single-letter ISA
184          * extensions are supported on all harts, and only supports the
185          * IMA base, so just cheat a bit here and tell that to
186          * userspace.
187          */
188         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
189                 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
190                 break;
191
192         case RISCV_HWPROBE_KEY_IMA_EXT_0:
193                 hwprobe_isa_ext0(pair, cpus);
194                 break;
195
196         case RISCV_HWPROBE_KEY_CPUPERF_0:
197                 pair->value = hwprobe_misaligned(cpus);
198                 break;
199
200         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
201                 pair->value = 0;
202                 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
203                         pair->value = riscv_cboz_block_size;
204                 break;
205
206         /*
207          * For forward compatibility, unknown keys don't fail the whole
208          * call, but get their element key set to -1 and value set to 0
209          * indicating they're unrecognized.
210          */
211         default:
212                 pair->key = -1;
213                 pair->value = 0;
214                 break;
215         }
216 }
217
218 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
219                               size_t pair_count, size_t cpusetsize,
220                               unsigned long __user *cpus_user,
221                               unsigned int flags)
222 {
223         size_t out;
224         int ret;
225         cpumask_t cpus;
226
227         /* Check the reserved flags. */
228         if (flags != 0)
229                 return -EINVAL;
230
231         /*
232          * The interface supports taking in a CPU mask, and returns values that
233          * are consistent across that mask. Allow userspace to specify NULL and
234          * 0 as a shortcut to all online CPUs.
235          */
236         cpumask_clear(&cpus);
237         if (!cpusetsize && !cpus_user) {
238                 cpumask_copy(&cpus, cpu_online_mask);
239         } else {
240                 if (cpusetsize > cpumask_size())
241                         cpusetsize = cpumask_size();
242
243                 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
244                 if (ret)
245                         return -EFAULT;
246
247                 /*
248                  * Userspace must provide at least one online CPU, without that
249                  * there's no way to define what is supported.
250                  */
251                 cpumask_and(&cpus, &cpus, cpu_online_mask);
252                 if (cpumask_empty(&cpus))
253                         return -EINVAL;
254         }
255
256         for (out = 0; out < pair_count; out++, pairs++) {
257                 struct riscv_hwprobe pair;
258
259                 if (get_user(pair.key, &pairs->key))
260                         return -EFAULT;
261
262                 pair.value = 0;
263                 hwprobe_one_pair(&pair, &cpus);
264                 ret = put_user(pair.key, &pairs->key);
265                 if (ret == 0)
266                         ret = put_user(pair.value, &pairs->value);
267
268                 if (ret)
269                         return -EFAULT;
270         }
271
272         return 0;
273 }
274
275 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
276                             size_t pair_count, size_t cpusetsize,
277                             unsigned long __user *cpus_user,
278                             unsigned int flags)
279 {
280         cpumask_t cpus, one_cpu;
281         bool clear_all = false;
282         size_t i;
283         int ret;
284
285         if (flags != RISCV_HWPROBE_WHICH_CPUS)
286                 return -EINVAL;
287
288         if (!cpusetsize || !cpus_user)
289                 return -EINVAL;
290
291         if (cpusetsize > cpumask_size())
292                 cpusetsize = cpumask_size();
293
294         ret = copy_from_user(&cpus, cpus_user, cpusetsize);
295         if (ret)
296                 return -EFAULT;
297
298         if (cpumask_empty(&cpus))
299                 cpumask_copy(&cpus, cpu_online_mask);
300
301         cpumask_and(&cpus, &cpus, cpu_online_mask);
302
303         cpumask_clear(&one_cpu);
304
305         for (i = 0; i < pair_count; i++) {
306                 struct riscv_hwprobe pair, tmp;
307                 int cpu;
308
309                 ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
310                 if (ret)
311                         return -EFAULT;
312
313                 if (!riscv_hwprobe_key_is_valid(pair.key)) {
314                         clear_all = true;
315                         pair = (struct riscv_hwprobe){ .key = -1, };
316                         ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
317                         if (ret)
318                                 return -EFAULT;
319                 }
320
321                 if (clear_all)
322                         continue;
323
324                 tmp = (struct riscv_hwprobe){ .key = pair.key, };
325
326                 for_each_cpu(cpu, &cpus) {
327                         cpumask_set_cpu(cpu, &one_cpu);
328
329                         hwprobe_one_pair(&tmp, &one_cpu);
330
331                         if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
332                                 cpumask_clear_cpu(cpu, &cpus);
333
334                         cpumask_clear_cpu(cpu, &one_cpu);
335                 }
336         }
337
338         if (clear_all)
339                 cpumask_clear(&cpus);
340
341         ret = copy_to_user(cpus_user, &cpus, cpusetsize);
342         if (ret)
343                 return -EFAULT;
344
345         return 0;
346 }
347
348 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
349                             size_t pair_count, size_t cpusetsize,
350                             unsigned long __user *cpus_user,
351                             unsigned int flags)
352 {
353         if (flags & RISCV_HWPROBE_WHICH_CPUS)
354                 return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
355                                         cpus_user, flags);
356
357         return hwprobe_get_values(pairs, pair_count, cpusetsize,
358                                   cpus_user, flags);
359 }
360
361 #ifdef CONFIG_MMU
362
363 static int __init init_hwprobe_vdso_data(void)
364 {
365         struct vdso_data *vd = __arch_get_k_vdso_data();
366         struct arch_vdso_data *avd = &vd->arch_data;
367         u64 id_bitsmash = 0;
368         struct riscv_hwprobe pair;
369         int key;
370
371         /*
372          * Initialize vDSO data with the answers for the "all CPUs" case, to
373          * save a syscall in the common case.
374          */
375         for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
376                 pair.key = key;
377                 hwprobe_one_pair(&pair, cpu_online_mask);
378
379                 WARN_ON_ONCE(pair.key < 0);
380
381                 avd->all_cpu_hwprobe_values[key] = pair.value;
382                 /*
383                  * Smash together the vendor, arch, and impl IDs to see if
384                  * they're all 0 or any negative.
385                  */
386                 if (key <= RISCV_HWPROBE_KEY_MIMPID)
387                         id_bitsmash |= pair.value;
388         }
389
390         /*
391          * If the arch, vendor, and implementation ID are all the same across
392          * all harts, then assume all CPUs are the same, and allow the vDSO to
393          * answer queries for arbitrary masks. However if all values are 0 (not
394          * populated) or any value returns -1 (varies across CPUs), then the
395          * vDSO should defer to the kernel for exotic cpu masks.
396          */
397         avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
398         return 0;
399 }
400
401 arch_initcall_sync(init_hwprobe_vdso_data);
402
403 #endif /* CONFIG_MMU */
404
405 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
406                 size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
407                 cpus, unsigned int, flags)
408 {
409         return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
410                                 cpus, flags);
411 }