1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copied from arch/arm64/kernel/cpufeature.c
5 * Copyright (C) 2015 ARM Ltd.
6 * Copyright (C) 2017 SiFive
9 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/ctype.h>
13 #include <linux/log2.h>
14 #include <linux/memory.h>
15 #include <linux/module.h>
18 #include <asm/alternative.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cpufeature.h>
21 #include <asm/hwcap.h>
22 #include <asm/hwprobe.h>
23 #include <asm/patch.h>
24 #include <asm/processor.h>
26 #include <asm/vector.h>
28 #include "copy-unaligned.h"
30 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
32 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
33 #define MISALIGNED_BUFFER_SIZE 0x4000
34 #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
35 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
37 unsigned long elf_hwcap __read_mostly;
40 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
42 /* Per-cpu ISA extensions. */
43 struct riscv_isainfo hart_isa[NR_CPUS];
45 /* Performance information */
46 DEFINE_PER_CPU(long, misaligned_access_speed);
49 * riscv_isa_extension_base() - Get base extension word
51 * @isa_bitmap: ISA bitmap to use
52 * Return: base extension word as unsigned long value
54 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
56 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
62 EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
65 * __riscv_isa_extension_available() - Check whether given extension
68 * @isa_bitmap: ISA bitmap to use
69 * @bit: bit position of the desired extension
70 * Return: true or false
72 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
74 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
76 const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
78 if (bit >= RISCV_ISA_EXT_MAX)
81 return test_bit(bit, bmap) ? true : false;
83 EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
85 static bool riscv_isa_extension_check(int id)
88 case RISCV_ISA_EXT_ZICBOM:
89 if (!riscv_cbom_block_size) {
90 pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
92 } else if (!is_power_of_2(riscv_cbom_block_size)) {
93 pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
97 case RISCV_ISA_EXT_ZICBOZ:
98 if (!riscv_cboz_block_size) {
99 pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
101 } else if (!is_power_of_2(riscv_cboz_block_size)) {
102 pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
111 #define __RISCV_ISA_EXT_DATA(_name, _id) { \
113 .property = #_name, \
118 * The canonical order of ISA extension names in the ISA string is defined in
119 * chapter 27 of the unprivileged specification.
121 * Ordinarily, for in-kernel data structures, this order is unimportant but
122 * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
124 * The specification uses vague wording, such as should, when it comes to
125 * ordering, so for our purposes the following rules apply:
127 * 1. All multi-letter extensions must be separated from other extensions by an
130 * 2. Additional standard extensions (starting with 'Z') must be sorted after
131 * single-letter extensions and before any higher-privileged extensions.
133 * 3. The first letter following the 'Z' conventionally indicates the most
134 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
135 * If multiple 'Z' extensions are named, they must be ordered first by
136 * category, then alphabetically within a category.
138 * 3. Standard supervisor-level extensions (starting with 'S') must be listed
139 * after standard unprivileged extensions. If multiple supervisor-level
140 * extensions are listed, they must be ordered alphabetically.
142 * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
143 * after any lower-privileged, standard extensions. If multiple
144 * machine-level extensions are listed, they must be ordered
147 * 5. Non-standard extensions (starting with 'X') must be listed after all
148 * standard extensions. If multiple non-standard extensions are listed, they
149 * must be ordered alphabetically.
151 * An example string following the order is:
152 * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
154 * New entries to this struct should follow the ordering rules described above.
156 const struct riscv_isa_ext_data riscv_isa_ext[] = {
157 __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
158 __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
159 __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
160 __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
161 __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
162 __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
163 __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
164 __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
165 __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
166 __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
167 __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
168 __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
169 __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
170 __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
171 __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
172 __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
173 __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
174 __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
175 __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
176 __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
177 __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
178 __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
179 __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
180 __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
181 __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
182 __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
183 __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
184 __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
185 __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
186 __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
187 __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
188 __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
191 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
193 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
194 unsigned long *isa2hwcap, const char *isa)
197 * For all possible cpus, we have already validated in
198 * the boot process that they at least contain "rv" and
199 * whichever of "32"/"64" this kernel supports, and so this
200 * section can be skipped.
205 const char *ext = isa++;
206 const char *ext_end = isa;
207 bool ext_long = false, ext_err = false;
212 * Workaround for invalid single-letter 's' & 'u' (QEMU).
213 * No need to set the bit in riscv_isa as 's' & 'u' are
214 * not valid ISA extensions. It works unless the first
215 * multi-letter extension in the ISA string begins with
216 * "Su" and is not prefixed with an underscore.
218 if (ext[-1] != '_' && ext[1] == 'u') {
230 * Before attempting to parse the extension itself, we find its end.
231 * As multi-letter extensions must be split from other multi-letter
232 * extensions with an "_", the end of a multi-letter extension will
233 * either be the null character or the "_" at the start of the next
234 * multi-letter extension.
236 * Next, as the extensions version is currently ignored, we
237 * eliminate that portion. This is done by parsing backwards from
238 * the end of the extension, removing any numbers. This may be a
239 * major or minor number however, so the process is repeated if a
240 * minor number was found.
242 * ext_end is intended to represent the first character *after* the
243 * name portion of an extension, but will be decremented to the last
244 * character itself while eliminating the extensions version number.
245 * A simple re-increment solves this problem.
248 for (; *isa && *isa != '_'; ++isa)
249 if (unlikely(!isalnum(*isa)))
253 if (unlikely(ext_err))
256 if (!isdigit(ext_end[-1]))
259 while (isdigit(*--ext_end))
262 if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
267 while (isdigit(*--ext_end))
274 * Things are a little easier for single-letter extensions, as they
275 * are parsed forwards.
277 * After checking that our starting position is valid, we need to
278 * ensure that, when isa was incremented at the start of the loop,
279 * that it arrived at the start of the next extension.
281 * If we are already on a non-digit, there is nothing to do. Either
282 * we have a multi-letter extension's _, or the start of an
285 * Otherwise we have found the current extension's major version
286 * number. Parse past it, and a subsequent p/minor version number
287 * if present. The `p` extension must not appear immediately after
288 * a number, so there is no fear of missing it.
291 if (unlikely(!isalpha(*ext))) {
299 while (isdigit(*++isa))
302 if (tolower(*isa) != 'p')
305 if (!isdigit(*++isa)) {
310 while (isdigit(*++isa))
317 * The parser expects that at the start of an iteration isa points to the
318 * first character of the next extension. As we stop parsing an extension
319 * on meeting a non-alphanumeric character, an extra increment is needed
320 * where the succeeding extension is a multi-letter prefixed with an "_".
325 #define SET_ISA_EXT_MAP(name, bit) \
327 if ((ext_end - ext == strlen(name)) && \
328 !strncasecmp(ext, name, strlen(name)) && \
329 riscv_isa_extension_check(bit)) \
330 set_bit(bit, isainfo->isa); \
333 if (unlikely(ext_err))
336 int nr = tolower(*ext) - 'a';
338 if (riscv_isa_extension_check(nr)) {
339 *this_hwcap |= isa2hwcap[nr];
340 set_bit(nr, isainfo->isa);
343 for (int i = 0; i < riscv_isa_ext_count; i++)
344 SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
345 riscv_isa_ext[i].id);
347 #undef SET_ISA_EXT_MAP
351 static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
353 struct device_node *node;
356 struct acpi_table_header *rhct;
360 if (!acpi_disabled) {
361 status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
362 if (ACPI_FAILURE(status))
366 for_each_possible_cpu(cpu) {
367 struct riscv_isainfo *isainfo = &hart_isa[cpu];
368 unsigned long this_hwcap = 0;
371 node = of_cpu_device_node_get(cpu);
373 pr_warn("Unable to find cpu node\n");
377 rc = of_property_read_string(node, "riscv,isa", &isa);
380 pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
384 rc = acpi_get_riscv_isa(rhct, cpu, &isa);
386 pr_warn("Unable to get ISA for the hart - %d\n", cpu);
391 riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
394 * These ones were as they were part of the base ISA when the
395 * port & dt-bindings were upstreamed, and so can be set
396 * unconditionally where `i` is in riscv,isa on DT systems.
399 set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
400 set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
401 set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
402 set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
406 * "V" in ISA strings is ambiguous in practice: it should mean
407 * just the standard V-1.0 but vendors aren't well behaved.
408 * Many vendors with T-Head CPU cores which implement the 0.7.1
409 * version of the vector specification put "v" into their DTs.
410 * CPU cores with the ratified spec will contain non-zero
413 if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
414 riscv_cached_marchid(cpu) == 0x0) {
415 this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
416 clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
420 * All "okay" hart should have same isa. Set HWCAP based on
421 * common capabilities of every "okay" hart, in case they don't
425 elf_hwcap &= this_hwcap;
427 elf_hwcap = this_hwcap;
429 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
430 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
432 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
435 if (!acpi_disabled && rhct)
436 acpi_put_table((struct acpi_table_header *)rhct);
439 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
443 for_each_possible_cpu(cpu) {
444 unsigned long this_hwcap = 0;
445 struct device_node *cpu_node;
446 struct riscv_isainfo *isainfo = &hart_isa[cpu];
448 cpu_node = of_cpu_device_node_get(cpu);
450 pr_warn("Unable to find cpu node\n");
454 if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
455 of_node_put(cpu_node);
459 for (int i = 0; i < riscv_isa_ext_count; i++) {
460 if (of_property_match_string(cpu_node, "riscv,isa-extensions",
461 riscv_isa_ext[i].property) < 0)
464 if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
467 /* Only single letter extensions get set in hwcap */
468 if (strnlen(riscv_isa_ext[i].name, 2) == 1)
469 this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
471 set_bit(riscv_isa_ext[i].id, isainfo->isa);
474 of_node_put(cpu_node);
477 * All "okay" harts should have same isa. Set HWCAP based on
478 * common capabilities of every "okay" hart, in case they don't.
481 elf_hwcap &= this_hwcap;
483 elf_hwcap = this_hwcap;
485 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
486 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
488 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
491 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
497 #ifdef CONFIG_RISCV_ISA_FALLBACK
498 bool __initdata riscv_isa_fallback = true;
500 bool __initdata riscv_isa_fallback;
501 static int __init riscv_isa_fallback_setup(char *__unused)
503 riscv_isa_fallback = true;
506 early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
509 void __init riscv_fill_hwcap(void)
511 char print_str[NUM_ALPHA_EXTS + 1];
512 unsigned long isa2hwcap[26] = {0};
515 isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
516 isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
517 isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
518 isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
519 isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
520 isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
521 isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
523 if (!acpi_disabled) {
524 riscv_fill_hwcap_from_isa_string(isa2hwcap);
526 int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
528 if (ret && riscv_isa_fallback) {
529 pr_info("Falling back to deprecated \"riscv,isa\"\n");
530 riscv_fill_hwcap_from_isa_string(isa2hwcap);
535 * We don't support systems with F but without D, so mask those out
538 if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
539 pr_info("This kernel does not support systems with F but not D\n");
540 elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
543 if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
544 riscv_v_setup_vsize();
546 * ISA string in device tree might have 'v' flag, but
547 * CONFIG_RISCV_ISA_V is disabled in kernel.
548 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
550 if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
551 elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
554 memset(print_str, 0, sizeof(print_str));
555 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
556 if (riscv_isa[0] & BIT_MASK(i))
557 print_str[j++] = (char)('a' + i);
558 pr_info("riscv: base ISA extensions %s\n", print_str);
560 memset(print_str, 0, sizeof(print_str));
561 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
562 if (elf_hwcap & BIT_MASK(i))
563 print_str[j++] = (char)('a' + i);
564 pr_info("riscv: ELF capabilities %s\n", print_str);
567 unsigned long riscv_get_elf_hwcap(void)
571 hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
573 if (!riscv_v_vstate_ctrl_user_allowed())
574 hwcap &= ~COMPAT_HWCAP_ISA_V;
579 static int check_unaligned_access(void *param)
581 int cpu = smp_processor_id();
582 u64 start_cycles, end_cycles;
586 unsigned long start_jiffies, now;
587 struct page *page = param;
590 long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
592 if (check_unaligned_access_emulated(cpu))
595 /* Make an unaligned destination buffer. */
596 dst = (void *)((unsigned long)page_address(page) | 0x1);
597 /* Unalign src as well, but differently (off by 1 + 2 = 3). */
598 src = dst + (MISALIGNED_BUFFER_SIZE / 2);
602 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
604 start_jiffies = jiffies;
605 while ((now = jiffies) == start_jiffies)
609 * For a fixed amount of time, repeatedly try the function, and take
610 * the best time in cycles as the measurement.
612 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
613 start_cycles = get_cycles64();
614 /* Ensure the CSR read can't reorder WRT to the copy. */
616 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
617 /* Ensure the copy ends before the end time is snapped. */
619 end_cycles = get_cycles64();
620 if ((end_cycles - start_cycles) < word_cycles)
621 word_cycles = end_cycles - start_cycles;
625 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
626 start_jiffies = jiffies;
627 while ((now = jiffies) == start_jiffies)
630 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
631 start_cycles = get_cycles64();
633 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
635 end_cycles = get_cycles64();
636 if ((end_cycles - start_cycles) < byte_cycles)
637 byte_cycles = end_cycles - start_cycles;
642 /* Don't divide by zero. */
643 if (!word_cycles || !byte_cycles) {
644 pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
650 if (word_cycles < byte_cycles)
651 speed = RISCV_HWPROBE_MISALIGNED_FAST;
653 ratio = div_u64((byte_cycles * 100), word_cycles);
654 pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
658 (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
660 per_cpu(misaligned_access_speed, cpu) = speed;
664 static void check_unaligned_access_nonboot_cpu(void *param)
666 unsigned int cpu = smp_processor_id();
667 struct page **pages = param;
669 if (smp_processor_id() != 0)
670 check_unaligned_access(pages[cpu]);
673 static int riscv_online_cpu(unsigned int cpu)
675 static struct page *buf;
677 /* We are already set since the last check */
678 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
681 buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
683 pr_warn("Allocation failure, not measuring misaligned performance\n");
687 check_unaligned_access(buf);
688 __free_pages(buf, MISALIGNED_BUFFER_ORDER);
692 /* Measure unaligned access on all CPUs present at boot in parallel. */
693 static int check_unaligned_access_all_cpus(void)
696 unsigned int cpu_count = num_possible_cpus();
697 struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
701 pr_warn("Allocation failure, not measuring misaligned performance\n");
706 * Allocate separate buffers for each CPU so there's no fighting over
709 for_each_cpu(cpu, cpu_online_mask) {
710 bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
712 pr_warn("Allocation failure, not measuring misaligned performance\n");
717 /* Check everybody except 0, who stays behind to tend jiffies. */
718 on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
721 smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
723 /* Setup hotplug callback for any new CPUs that come online. */
724 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
725 riscv_online_cpu, NULL);
728 unaligned_emulation_finish();
729 for_each_cpu(cpu, cpu_online_mask) {
731 __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
738 arch_initcall(check_unaligned_access_all_cpus);
740 void riscv_user_isa_enable(void)
742 if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
743 csr_set(CSR_ENVCFG, ENVCFG_CBZE);
746 #ifdef CONFIG_RISCV_ALTERNATIVE
748 * Alternative patch sites consider 48 bits when determining when to patch
749 * the old instruction sequence with the new. These bits are broken into a
750 * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
751 * patch site is for an erratum, identified by the 32-bit patch ID. When
752 * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
753 * further break down patch ID into two 16-bit numbers. The lower 16 bits
754 * are the cpufeature ID and the upper 16 bits are used for a value specific
755 * to the cpufeature and patch site. If the upper 16 bits are zero, then it
756 * implies no specific value is specified. cpufeatures that want to control
757 * patching on a per-site basis will provide non-zero values and implement
758 * checks here. The checks return true when patching should be done, and
761 static bool riscv_cpufeature_patch_check(u16 id, u16 value)
767 case RISCV_ISA_EXT_ZICBOZ:
769 * Zicboz alternative applications provide the maximum
770 * supported block size order, or zero when it doesn't
771 * matter. If the current block size exceeds the maximum,
772 * then the alternative cannot be applied.
774 return riscv_cboz_block_size <= (1U << value);
780 void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
781 struct alt_entry *end,
784 struct alt_entry *alt;
785 void *oldptr, *altptr;
788 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
791 for (alt = begin; alt < end; alt++) {
792 if (alt->vendor_id != 0)
795 id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
797 if (id >= RISCV_ISA_EXT_MAX) {
798 WARN(1, "This extension id:%d is not in ISA extension list", id);
802 if (!__riscv_isa_extension_available(NULL, id))
805 value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
806 if (!riscv_cpufeature_patch_check(id, value))
809 oldptr = ALT_OLD_PTR(alt);
810 altptr = ALT_ALT_PTR(alt);
812 mutex_lock(&text_mutex);
813 patch_text_nosync(oldptr, altptr, alt->alt_len);
814 riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
815 mutex_unlock(&text_mutex);