3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/memory.h>
39 #include <linux/nmi.h>
41 #include <asm/debugfs.h>
43 #include <asm/kdump.h>
45 #include <asm/processor.h>
46 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
52 #include <asm/cputable.h>
53 #include <asm/dt_cpu_ftrs.h>
54 #include <asm/sections.h>
55 #include <asm/btext.h>
56 #include <asm/nvram.h>
57 #include <asm/setup.h>
59 #include <asm/iommu.h>
60 #include <asm/serial.h>
61 #include <asm/cache.h>
64 #include <asm/firmware.h>
67 #include <asm/kexec.h>
68 #include <asm/code-patching.h>
69 #include <asm/livepatch.h>
71 #include <asm/cputhreads.h>
74 #define DBG(fmt...) udbg_printf(fmt)
79 int spinning_secondaries;
82 struct ppc64_caches ppc64_caches = {
92 EXPORT_SYMBOL_GPL(ppc64_caches);
94 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
95 void __init setup_tlb_core_data(void)
99 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
101 for_each_possible_cpu(cpu) {
102 int first = cpu_first_thread_sibling(cpu);
105 * If we boot via kdump on a non-primary thread,
106 * make sure we point at the thread that actually
109 if (cpu_first_thread_sibling(boot_cpuid) == first)
112 paca[cpu].tcd_ptr = &paca[first].tcd;
115 * If we have threads, we need either tlbsrx.
116 * or e6500 tablewalk mode, or else TLB handlers
117 * will be racy and could produce duplicate entries.
118 * Should we panic instead?
120 WARN_ONCE(smt_enabled_at_boot >= 2 &&
121 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
122 book3e_htw_mode != PPC_HTW_E6500,
123 "%s: unsupported MMU configuration\n", __func__);
130 static char *smt_enabled_cmdline;
132 /* Look for ibm,smt-enabled OF option */
133 void __init check_smt_enabled(void)
135 struct device_node *dn;
136 const char *smt_option;
138 /* Default to enabling all threads */
139 smt_enabled_at_boot = threads_per_core;
141 /* Allow the command line to overrule the OF option */
142 if (smt_enabled_cmdline) {
143 if (!strcmp(smt_enabled_cmdline, "on"))
144 smt_enabled_at_boot = threads_per_core;
145 else if (!strcmp(smt_enabled_cmdline, "off"))
146 smt_enabled_at_boot = 0;
151 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
153 smt_enabled_at_boot =
154 min(threads_per_core, smt);
157 dn = of_find_node_by_path("/options");
159 smt_option = of_get_property(dn, "ibm,smt-enabled",
163 if (!strcmp(smt_option, "on"))
164 smt_enabled_at_boot = threads_per_core;
165 else if (!strcmp(smt_option, "off"))
166 smt_enabled_at_boot = 0;
174 /* Look for smt-enabled= cmdline option */
175 static int __init early_smt_enabled(char *p)
177 smt_enabled_cmdline = p;
180 early_param("smt-enabled", early_smt_enabled);
182 #endif /* CONFIG_SMP */
184 /** Fix up paca fields required for the boot cpu */
185 static void __init fixup_boot_paca(void)
187 /* The boot cpu is started */
188 get_paca()->cpu_start = 1;
189 /* Allow percpu accesses to work until we setup percpu data */
190 get_paca()->data_offset = 0;
193 static void __init configure_exceptions(void)
196 * Setup the trampolines from the lowmem exception vectors
197 * to the kdump kernel when not using a relocatable kernel.
199 setup_kdump_trampoline();
201 /* Under a PAPR hypervisor, we need hypercalls */
202 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
203 /* Enable AIL if possible */
204 pseries_enable_reloc_on_exc();
207 * Tell the hypervisor that we want our exceptions to
208 * be taken in little endian mode.
210 * We don't call this for big endian as our calling convention
211 * makes us always enter in BE, and the call may fail under
212 * some circumstances with kdump.
214 #ifdef __LITTLE_ENDIAN__
215 pseries_little_endian_exceptions();
218 /* Set endian mode using OPAL */
219 if (firmware_has_feature(FW_FEATURE_OPAL))
220 opal_configure_cores();
222 /* AIL on native is done in cpu_ready_for_interrupts() */
226 static void cpu_ready_for_interrupts(void)
229 * Enable AIL if supported, and we are in hypervisor mode. This
230 * is called once for every processor.
232 * If we are not in hypervisor mode the job is done once for
233 * the whole partition in configure_exceptions().
235 if (cpu_has_feature(CPU_FTR_HVMODE) &&
236 cpu_has_feature(CPU_FTR_ARCH_207S)) {
237 unsigned long lpcr = mfspr(SPRN_LPCR);
238 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
242 * Fixup HFSCR:TM based on CPU features. The bit is set by our
243 * early asm init because at that point we haven't updated our
244 * CPU features from firmware and device-tree. Here we have,
247 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
248 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
250 /* Set IR and DR in PACA MSR */
251 get_paca()->kernel_msr = MSR_KERNEL;
255 * Early initialization entry point. This is called by head.S
256 * with MMU translation disabled. We rely on the "feature" of
257 * the CPU that ignores the top 2 bits of the address in real
258 * mode so we can access kernel globals normally provided we
259 * only toy with things in the RMO region. From here, we do
260 * some early parsing of the device-tree to setup out MEMBLOCK
261 * data structures, and allocate & initialize the hash table
262 * and segment tables so we can start running with translation
265 * It is this function which will call the probe() callback of
266 * the various platform types and copy the matching one to the
267 * global ppc_md structure. Your platform can eventually do
268 * some very early initializations from the probe() routine, but
269 * this is not recommended, be very careful as, for example, the
270 * device-tree is not accessible via normal means at this point.
273 void __init early_setup(unsigned long dt_ptr)
275 static __initdata struct paca_struct boot_paca;
277 /* -------- printk is _NOT_ safe to use here ! ------- */
279 /* Try new device tree based feature discovery ... */
280 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
281 /* Otherwise use the old style CPU table */
282 identify_cpu(0, mfspr(SPRN_PVR));
284 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
285 initialise_paca(&boot_paca, 0);
286 setup_paca(&boot_paca);
289 /* -------- printk is now safe to use ------- */
291 /* Enable early debugging if any specified (see udbg.h) */
294 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
297 * Do early initialization using the flattened device
298 * tree, such as retrieving the physical memory map or
299 * calculating/retrieving the hash table size.
301 early_init_devtree(__va(dt_ptr));
303 /* Now we know the logical id of our boot cpu, setup the paca. */
304 setup_paca(&paca[boot_cpuid]);
308 * Configure exception handlers. This include setting up trampolines
309 * if needed, setting exception endian mode, etc...
311 configure_exceptions();
313 /* Apply all the dynamic patching */
314 apply_feature_fixups();
315 setup_feature_keys();
317 /* Initialize the hash table or TLB handling */
321 * At this point, we can let interrupts switch to virtual mode
322 * (the MMU has been setup), so adjust the MSR in the PACA to
323 * have IR and DR set and enable AIL if it exists
325 cpu_ready_for_interrupts();
327 DBG(" <- early_setup()\n");
329 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
331 * This needs to be done *last* (after the above DBG() even)
333 * Right after we return from this function, we turn on the MMU
334 * which means the real-mode access trick that btext does will
335 * no longer work, it needs to switch to using a real MMU
336 * mapping. This call will ensure that it does
339 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
343 void early_setup_secondary(void)
345 /* Mark interrupts disabled in PACA */
346 get_paca()->soft_enabled = 0;
348 /* Initialize the hash table or TLB handling */
349 early_init_mmu_secondary();
352 * At this point, we can let interrupts switch to virtual mode
353 * (the MMU has been setup), so adjust the MSR in the PACA to
354 * have IR and DR set.
356 cpu_ready_for_interrupts();
359 #endif /* CONFIG_SMP */
361 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
362 static bool use_spinloop(void)
364 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
368 * When book3e boots from kexec, the ePAPR spin table does
371 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
374 void smp_release_cpus(void)
382 DBG(" -> smp_release_cpus()\n");
384 /* All secondary cpus are spinning on a common spinloop, release them
385 * all now so they can start to spin on their individual paca
386 * spinloops. For non SMP kernels, the secondary cpus never get out
387 * of the common spinloop.
390 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
392 *ptr = ppc_function_entry(generic_secondary_smp_init);
394 /* And wait a bit for them to catch up */
395 for (i = 0; i < 100000; i++) {
398 if (spinning_secondaries == 0)
402 DBG("spinning_secondaries = %d\n", spinning_secondaries);
404 DBG(" <- smp_release_cpus()\n");
406 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
409 * Initialize some remaining members of the ppc64_caches and systemcfg
411 * (at least until we get rid of them completely). This is mostly some
412 * cache informations about the CPU that will be used by cache flush
413 * routines and/or provided to userland
416 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
421 info->line_size = lsize;
422 info->block_size = bsize;
423 info->log_block_size = __ilog2(bsize);
425 info->blocks_per_page = PAGE_SIZE / bsize;
427 info->blocks_per_page = 0;
430 info->assoc = 0xffff;
432 info->assoc = size / (sets * lsize);
435 static bool __init parse_cache_info(struct device_node *np,
437 struct ppc_cache_info *info)
439 static const char *ipropnames[] __initdata = {
442 "i-cache-block-size",
445 static const char *dpropnames[] __initdata = {
448 "d-cache-block-size",
451 const char **propnames = icache ? ipropnames : dpropnames;
452 const __be32 *sizep, *lsizep, *bsizep, *setsp;
453 u32 size, lsize, bsize, sets;
458 lsize = bsize = cur_cpu_spec->dcache_bsize;
459 sizep = of_get_property(np, propnames[0], NULL);
461 size = be32_to_cpu(*sizep);
462 setsp = of_get_property(np, propnames[1], NULL);
464 sets = be32_to_cpu(*setsp);
465 bsizep = of_get_property(np, propnames[2], NULL);
466 lsizep = of_get_property(np, propnames[3], NULL);
472 lsize = be32_to_cpu(*lsizep);
474 bsize = be32_to_cpu(*bsizep);
475 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
479 * OF is weird .. it represents fully associative caches
480 * as "1 way" which doesn't make much sense and doesn't
481 * leave room for direct mapped. We'll assume that 0
482 * in OF means direct mapped for that reason.
489 init_cache_info(info, size, lsize, bsize, sets);
494 void __init initialize_cache_info(void)
496 struct device_node *cpu = NULL, *l2, *l3 = NULL;
499 DBG(" -> initialize_cache_info()\n");
502 * All shipping POWER8 machines have a firmware bug that
503 * puts incorrect information in the device-tree. This will
504 * be (hopefully) fixed for future chips but for now hard
505 * code the values if we are running on one of these
507 pvr = PVR_VER(mfspr(SPRN_PVR));
508 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
509 pvr == PVR_POWER8NVL) {
510 /* size lsize blk sets */
511 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
512 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
513 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
514 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
516 cpu = of_find_node_by_type(NULL, "cpu");
519 * We're assuming *all* of the CPUs have the same
520 * d-cache and i-cache sizes... -Peter
523 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
524 DBG("Argh, can't find dcache properties !\n");
526 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
527 DBG("Argh, can't find icache properties !\n");
530 * Try to find the L2 and L3 if any. Assume they are
531 * unified and use the D-side properties.
533 l2 = of_find_next_cache_node(cpu);
536 parse_cache_info(l2, false, &ppc64_caches.l2);
537 l3 = of_find_next_cache_node(l2);
541 parse_cache_info(l3, false, &ppc64_caches.l3);
546 /* For use by binfmt_elf */
547 dcache_bsize = ppc64_caches.l1d.block_size;
548 icache_bsize = ppc64_caches.l1i.block_size;
550 cur_cpu_spec->dcache_bsize = dcache_bsize;
551 cur_cpu_spec->icache_bsize = icache_bsize;
553 DBG(" <- initialize_cache_info()\n");
556 /* This returns the limit below which memory accesses to the linear
557 * mapping are guarnateed not to cause a TLB or SLB miss. This is
558 * used to allocate interrupt or emergency stacks for which our
559 * exception entry path doesn't deal with being interrupted.
561 static __init u64 safe_stack_limit(void)
563 #ifdef CONFIG_PPC_BOOK3E
564 /* Freescale BookE bolts the entire linear mapping */
565 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
566 return linear_map_top;
567 /* Other BookE, we assume the first GB is bolted */
570 if (early_radix_enabled())
573 /* BookS, the first segment is bolted */
574 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
575 return 1UL << SID_SHIFT_1T;
576 return 1UL << SID_SHIFT;
580 void __init irqstack_early_init(void)
582 u64 limit = safe_stack_limit();
586 * Interrupt stacks must be in the first segment since we
587 * cannot afford to take SLB misses on them. They are not
588 * accessed in realmode.
590 for_each_possible_cpu(i) {
591 softirq_ctx[i] = (struct thread_info *)
592 __va(memblock_alloc_base(THREAD_SIZE,
593 THREAD_SIZE, limit));
594 hardirq_ctx[i] = (struct thread_info *)
595 __va(memblock_alloc_base(THREAD_SIZE,
596 THREAD_SIZE, limit));
600 #ifdef CONFIG_PPC_BOOK3E
601 void __init exc_lvl_early_init(void)
606 for_each_possible_cpu(i) {
607 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
608 critirq_ctx[i] = (struct thread_info *)__va(sp);
609 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
611 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
612 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
613 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
615 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
616 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
617 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
620 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
621 patch_exception(0x040, exc_debug_debug_book3e);
626 * Emergency stacks are used for a range of things, from asynchronous
627 * NMIs (system reset, machine check) to synchronous, process context.
628 * We set preempt_count to zero, even though that isn't necessarily correct. To
629 * get the right value we'd need to copy it from the previous thread_info, but
630 * doing that might fault causing more problems.
631 * TODO: what to do with accounting?
633 static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
637 ti->preempt_count = 0;
640 klp_init_thread_info(ti);
644 * Stack space used when we detect a bad kernel stack pointer, and
645 * early in SMP boots before relocation is enabled. Exclusive emergency
646 * stack for machine checks.
648 void __init emergency_stack_init(void)
654 * Emergency stacks must be under 256MB, we cannot afford to take
655 * SLB misses on them. The ABI also requires them to be 128-byte
658 * Since we use these as temporary stacks during secondary CPU
659 * bringup, machine check, system reset, and HMI, we need to get
660 * at them in real mode. This means they must also be within the RMO
663 * The IRQ stacks allocated elsewhere in this file are zeroed and
664 * initialized in kernel/irq.c. These are initialized here in order
665 * to have emergency stacks available as early as possible.
667 limit = min(safe_stack_limit(), ppc64_rma_size);
669 for_each_possible_cpu(i) {
670 struct thread_info *ti;
671 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
672 memset(ti, 0, THREAD_SIZE);
673 emerg_stack_init_thread_info(ti, i);
674 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
676 #ifdef CONFIG_PPC_BOOK3S_64
677 /* emergency stack for NMI exception handling. */
678 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
679 memset(ti, 0, THREAD_SIZE);
680 emerg_stack_init_thread_info(ti, i);
681 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
683 /* emergency stack for machine check exception handling. */
684 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
685 memset(ti, 0, THREAD_SIZE);
686 emerg_stack_init_thread_info(ti, i);
687 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
693 #define PCPU_DYN_SIZE ()
695 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
697 return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
698 __pa(MAX_DMA_ADDRESS));
701 static void __init pcpu_fc_free(void *ptr, size_t size)
703 free_bootmem(__pa(ptr), size);
706 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
708 if (early_cpu_to_node(from) == early_cpu_to_node(to))
709 return LOCAL_DISTANCE;
711 return REMOTE_DISTANCE;
714 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
715 EXPORT_SYMBOL(__per_cpu_offset);
717 void __init setup_per_cpu_areas(void)
719 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
726 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
727 * to group units. For larger mappings, use 1M atom which
728 * should be large enough to contain a number of units.
730 if (mmu_linear_psize == MMU_PAGE_4K)
731 atom_size = PAGE_SIZE;
735 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
736 pcpu_fc_alloc, pcpu_fc_free);
738 panic("cannot initialize percpu area (err=%d)", rc);
740 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
741 for_each_possible_cpu(cpu) {
742 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
743 paca[cpu].data_offset = __per_cpu_offset[cpu];
748 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
749 unsigned long memory_block_size_bytes(void)
751 if (ppc_md.memory_block_size)
752 return ppc_md.memory_block_size();
754 return MIN_MEMORY_BLOCK_SIZE;
758 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
759 struct ppc_pci_io ppc_pci_io;
760 EXPORT_SYMBOL(ppc_pci_io);
763 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
764 u64 hw_nmi_get_sample_period(int watchdog_thresh)
766 return ppc_proc_freq * watchdog_thresh;
771 * The perf based hardlockup detector breaks PMU event based branches, so
772 * disable it by default. Book3S has a soft-nmi hardlockup detector based
773 * on the decrementer interrupt, so it does not suffer from this problem.
775 * It is likely to get false positives in VM guests, so disable it there
778 static int __init disable_hardlockup_detector(void)
780 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
781 hardlockup_detector_disable();
783 if (firmware_has_feature(FW_FEATURE_LPAR))
784 hardlockup_detector_disable();
789 early_initcall(disable_hardlockup_detector);
791 #ifdef CONFIG_PPC_BOOK3S_64
792 static enum l1d_flush_type enabled_flush_types;
793 static void *l1d_flush_fallback_area;
794 static bool no_rfi_flush;
795 static bool no_entry_flush;
796 static bool no_uaccess_flush;
800 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
801 EXPORT_SYMBOL(uaccess_flush_key);
803 static int __init handle_no_rfi_flush(char *p)
805 pr_info("rfi-flush: disabled on command line.");
809 early_param("no_rfi_flush", handle_no_rfi_flush);
811 static int __init handle_no_entry_flush(char *p)
813 pr_info("entry-flush: disabled on command line.");
814 no_entry_flush = true;
817 early_param("no_entry_flush", handle_no_entry_flush);
819 static int __init handle_no_uaccess_flush(char *p)
821 pr_info("uaccess-flush: disabled on command line.");
822 no_uaccess_flush = true;
825 early_param("no_uaccess_flush", handle_no_uaccess_flush);
828 * The RFI flush is not KPTI, but because users will see doco that says to use
829 * nopti we hijack that option here to also disable the RFI flush.
831 static int __init handle_no_pti(char *p)
833 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
834 handle_no_rfi_flush(NULL);
837 early_param("nopti", handle_no_pti);
839 static void do_nothing(void *unused)
842 * We don't need to do the flush explicitly, just enter+exit kernel is
843 * sufficient, the RFI exit handlers will do the right thing.
847 void rfi_flush_enable(bool enable)
850 do_rfi_flush_fixups(enabled_flush_types);
851 on_each_cpu(do_nothing, NULL, 1);
853 do_rfi_flush_fixups(L1D_FLUSH_NONE);
858 void entry_flush_enable(bool enable)
861 do_entry_flush_fixups(enabled_flush_types);
862 on_each_cpu(do_nothing, NULL, 1);
864 do_entry_flush_fixups(L1D_FLUSH_NONE);
867 entry_flush = enable;
870 void uaccess_flush_enable(bool enable)
873 do_uaccess_flush_fixups(enabled_flush_types);
874 static_branch_enable(&uaccess_flush_key);
875 on_each_cpu(do_nothing, NULL, 1);
877 static_branch_disable(&uaccess_flush_key);
878 do_uaccess_flush_fixups(L1D_FLUSH_NONE);
881 uaccess_flush = enable;
884 static void __ref init_fallback_flush(void)
889 /* Only allocate the fallback flush area once (at boot time). */
890 if (l1d_flush_fallback_area)
893 l1d_size = ppc64_caches.l1d.size;
894 limit = min(safe_stack_limit(), ppc64_rma_size);
897 * Align to L1d size, and size it at 2x L1d size, to catch possible
898 * hardware prefetch runoff. We don't have a recipe for load patterns to
899 * reliably avoid the prefetcher.
901 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
902 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
904 for_each_possible_cpu(cpu) {
905 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
906 paca[cpu].l1d_flush_size = l1d_size;
910 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
912 if (types & L1D_FLUSH_FALLBACK) {
913 pr_info("rfi-flush: fallback displacement flush available\n");
914 init_fallback_flush();
917 if (types & L1D_FLUSH_ORI)
918 pr_info("rfi-flush: ori type flush available\n");
920 if (types & L1D_FLUSH_MTTRIG)
921 pr_info("rfi-flush: mttrig type flush available\n");
923 enabled_flush_types = types;
925 if (!cpu_mitigations_off() && !no_rfi_flush)
926 rfi_flush_enable(enable);
929 void setup_entry_flush(bool enable)
931 if (cpu_mitigations_off())
935 entry_flush_enable(enable);
938 void setup_uaccess_flush(bool enable)
940 if (cpu_mitigations_off())
943 if (!no_uaccess_flush)
944 uaccess_flush_enable(enable);
947 #ifdef CONFIG_DEBUG_FS
948 static int rfi_flush_set(void *data, u64 val)
959 /* Only do anything if we're changing state */
960 if (enable != rfi_flush)
961 rfi_flush_enable(enable);
966 static int rfi_flush_get(void *data, u64 *val)
968 *val = rfi_flush ? 1 : 0;
972 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
974 static int entry_flush_set(void *data, u64 val)
985 /* Only do anything if we're changing state */
986 if (enable != entry_flush)
987 entry_flush_enable(enable);
992 static int entry_flush_get(void *data, u64 *val)
994 *val = entry_flush ? 1 : 0;
998 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
1000 static int uaccess_flush_set(void *data, u64 val)
1011 /* Only do anything if we're changing state */
1012 if (enable != uaccess_flush)
1013 uaccess_flush_enable(enable);
1018 static int uaccess_flush_get(void *data, u64 *val)
1020 *val = uaccess_flush ? 1 : 0;
1024 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
1026 static __init int rfi_flush_debugfs_init(void)
1028 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
1029 debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
1030 debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
1033 device_initcall(rfi_flush_debugfs_init);
1035 #endif /* CONFIG_PPC_BOOK3S_64 */