1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Common boot and setup code.
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
9 #include <linux/export.h>
10 #include <linux/string.h>
11 #include <linux/sched.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/reboot.h>
15 #include <linux/delay.h>
16 #include <linux/initrd.h>
17 #include <linux/seq_file.h>
18 #include <linux/ioport.h>
19 #include <linux/console.h>
20 #include <linux/utsname.h>
21 #include <linux/tty.h>
22 #include <linux/root_dev.h>
23 #include <linux/notifier.h>
24 #include <linux/cpu.h>
25 #include <linux/unistd.h>
26 #include <linux/serial.h>
27 #include <linux/serial_8250.h>
28 #include <linux/memblock.h>
29 #include <linux/pci.h>
30 #include <linux/lockdep.h>
31 #include <linux/memory.h>
32 #include <linux/nmi.h>
34 #include <asm/debugfs.h>
36 #include <asm/kdump.h>
38 #include <asm/processor.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
45 #include <asm/cputable.h>
46 #include <asm/dt_cpu_ftrs.h>
47 #include <asm/sections.h>
48 #include <asm/btext.h>
49 #include <asm/nvram.h>
50 #include <asm/setup.h>
52 #include <asm/iommu.h>
53 #include <asm/serial.h>
54 #include <asm/cache.h>
57 #include <asm/firmware.h>
60 #include <asm/kexec.h>
61 #include <asm/code-patching.h>
62 #include <asm/livepatch.h>
64 #include <asm/cputhreads.h>
65 #include <asm/hw_irq.h>
66 #include <asm/feature-fixups.h>
72 #define DBG(fmt...) udbg_printf(fmt)
77 int spinning_secondaries;
80 struct ppc64_caches ppc64_caches = {
90 EXPORT_SYMBOL_GPL(ppc64_caches);
92 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
93 void __init setup_tlb_core_data(void)
97 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
99 for_each_possible_cpu(cpu) {
100 int first = cpu_first_thread_sibling(cpu);
103 * If we boot via kdump on a non-primary thread,
104 * make sure we point at the thread that actually
107 if (cpu_first_thread_sibling(boot_cpuid) == first)
110 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
113 * If we have threads, we need either tlbsrx.
114 * or e6500 tablewalk mode, or else TLB handlers
115 * will be racy and could produce duplicate entries.
116 * Should we panic instead?
118 WARN_ONCE(smt_enabled_at_boot >= 2 &&
119 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
120 book3e_htw_mode != PPC_HTW_E6500,
121 "%s: unsupported MMU configuration\n", __func__);
128 static char *smt_enabled_cmdline;
130 /* Look for ibm,smt-enabled OF option */
131 void __init check_smt_enabled(void)
133 struct device_node *dn;
134 const char *smt_option;
136 /* Default to enabling all threads */
137 smt_enabled_at_boot = threads_per_core;
139 /* Allow the command line to overrule the OF option */
140 if (smt_enabled_cmdline) {
141 if (!strcmp(smt_enabled_cmdline, "on"))
142 smt_enabled_at_boot = threads_per_core;
143 else if (!strcmp(smt_enabled_cmdline, "off"))
144 smt_enabled_at_boot = 0;
149 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
151 smt_enabled_at_boot =
152 min(threads_per_core, smt);
155 dn = of_find_node_by_path("/options");
157 smt_option = of_get_property(dn, "ibm,smt-enabled",
161 if (!strcmp(smt_option, "on"))
162 smt_enabled_at_boot = threads_per_core;
163 else if (!strcmp(smt_option, "off"))
164 smt_enabled_at_boot = 0;
172 /* Look for smt-enabled= cmdline option */
173 static int __init early_smt_enabled(char *p)
175 smt_enabled_cmdline = p;
178 early_param("smt-enabled", early_smt_enabled);
180 #endif /* CONFIG_SMP */
182 /** Fix up paca fields required for the boot cpu */
183 static void __init fixup_boot_paca(void)
185 /* The boot cpu is started */
186 get_paca()->cpu_start = 1;
187 /* Allow percpu accesses to work until we setup percpu data */
188 get_paca()->data_offset = 0;
189 /* Mark interrupts disabled in PACA */
190 irq_soft_mask_set(IRQS_DISABLED);
193 static void __init configure_exceptions(void)
196 * Setup the trampolines from the lowmem exception vectors
197 * to the kdump kernel when not using a relocatable kernel.
199 setup_kdump_trampoline();
201 /* Under a PAPR hypervisor, we need hypercalls */
202 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
203 /* Enable AIL if possible */
204 pseries_enable_reloc_on_exc();
207 * Tell the hypervisor that we want our exceptions to
208 * be taken in little endian mode.
210 * We don't call this for big endian as our calling convention
211 * makes us always enter in BE, and the call may fail under
212 * some circumstances with kdump.
214 #ifdef __LITTLE_ENDIAN__
215 pseries_little_endian_exceptions();
218 /* Set endian mode using OPAL */
219 if (firmware_has_feature(FW_FEATURE_OPAL))
220 opal_configure_cores();
222 /* AIL on native is done in cpu_ready_for_interrupts() */
226 static void cpu_ready_for_interrupts(void)
229 * Enable AIL if supported, and we are in hypervisor mode. This
230 * is called once for every processor.
232 * If we are not in hypervisor mode the job is done once for
233 * the whole partition in configure_exceptions().
235 if (cpu_has_feature(CPU_FTR_HVMODE) &&
236 cpu_has_feature(CPU_FTR_ARCH_207S)) {
237 unsigned long lpcr = mfspr(SPRN_LPCR);
238 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
242 * Set HFSCR:TM based on CPU features:
243 * In the special case of TM no suspend (P9N DD2.1), Linux is
244 * told TM is off via the dt-ftrs but told to (partially) use
245 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
246 * will be off from dt-ftrs but we need to turn it on for the
249 if (cpu_has_feature(CPU_FTR_HVMODE)) {
250 if (cpu_has_feature(CPU_FTR_TM_COMP))
251 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
253 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
256 /* Set IR and DR in PACA MSR */
257 get_paca()->kernel_msr = MSR_KERNEL;
260 unsigned long spr_default_dscr = 0;
262 void __init record_spr_defaults(void)
264 if (early_cpu_has_feature(CPU_FTR_DSCR))
265 spr_default_dscr = mfspr(SPRN_DSCR);
269 * Early initialization entry point. This is called by head.S
270 * with MMU translation disabled. We rely on the "feature" of
271 * the CPU that ignores the top 2 bits of the address in real
272 * mode so we can access kernel globals normally provided we
273 * only toy with things in the RMO region. From here, we do
274 * some early parsing of the device-tree to setup out MEMBLOCK
275 * data structures, and allocate & initialize the hash table
276 * and segment tables so we can start running with translation
279 * It is this function which will call the probe() callback of
280 * the various platform types and copy the matching one to the
281 * global ppc_md structure. Your platform can eventually do
282 * some very early initializations from the probe() routine, but
283 * this is not recommended, be very careful as, for example, the
284 * device-tree is not accessible via normal means at this point.
287 void __init early_setup(unsigned long dt_ptr)
289 static __initdata struct paca_struct boot_paca;
291 /* -------- printk is _NOT_ safe to use here ! ------- */
294 * Assume we're on cpu 0 for now.
296 * We need to load a PACA very early for a few reasons.
298 * The stack protector canary is stored in the paca, so as soon as we
299 * call any stack protected code we need r13 pointing somewhere valid.
301 * If we are using kcov it will call in_task() in its instrumentation,
302 * which relies on the current task from the PACA.
304 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
305 * printk(), which can trigger both stack protector and kcov.
307 * percpu variables and spin locks also use the paca.
309 * So set up a temporary paca. It will be replaced below once we know
310 * what CPU we are on.
312 initialise_paca(&boot_paca, 0);
313 setup_paca(&boot_paca);
316 /* -------- printk is now safe to use ------- */
318 /* Try new device tree based feature discovery ... */
319 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
320 /* Otherwise use the old style CPU table */
321 identify_cpu(0, mfspr(SPRN_PVR));
323 /* Enable early debugging if any specified (see udbg.h) */
326 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
329 * Do early initialization using the flattened device
330 * tree, such as retrieving the physical memory map or
331 * calculating/retrieving the hash table size.
333 early_init_devtree(__va(dt_ptr));
335 /* Now we know the logical id of our boot cpu, setup the paca. */
336 if (boot_cpuid != 0) {
337 /* Poison paca_ptrs[0] again if it's not the boot cpu */
338 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
340 setup_paca(paca_ptrs[boot_cpuid]);
344 * Configure exception handlers. This include setting up trampolines
345 * if needed, setting exception endian mode, etc...
347 configure_exceptions();
350 * Configure Kernel Userspace Protection. This needs to happen before
351 * feature fixups for platforms that implement this using features.
355 /* Apply all the dynamic patching */
356 apply_feature_fixups();
357 setup_feature_keys();
359 /* Initialize the hash table or TLB handling */
363 * After firmware and early platform setup code has set things up,
364 * we note the SPR values for configurable control/performance
365 * registers, and use those as initial defaults.
367 record_spr_defaults();
370 * At this point, we can let interrupts switch to virtual mode
371 * (the MMU has been setup), so adjust the MSR in the PACA to
372 * have IR and DR set and enable AIL if it exists
374 cpu_ready_for_interrupts();
377 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
378 * will only actually get enabled on the boot cpu much later once
379 * ftrace itself has been initialized.
381 this_cpu_enable_ftrace();
383 DBG(" <- early_setup()\n");
385 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
387 * This needs to be done *last* (after the above DBG() even)
389 * Right after we return from this function, we turn on the MMU
390 * which means the real-mode access trick that btext does will
391 * no longer work, it needs to switch to using a real MMU
392 * mapping. This call will ensure that it does
395 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
399 void early_setup_secondary(void)
401 /* Mark interrupts disabled in PACA */
402 irq_soft_mask_set(IRQS_DISABLED);
404 /* Initialize the hash table or TLB handling */
405 early_init_mmu_secondary();
407 /* Perform any KUP setup that is per-cpu */
411 * At this point, we can let interrupts switch to virtual mode
412 * (the MMU has been setup), so adjust the MSR in the PACA to
413 * have IR and DR set.
415 cpu_ready_for_interrupts();
418 #endif /* CONFIG_SMP */
420 void panic_smp_self_stop(void)
428 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
429 static bool use_spinloop(void)
431 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
433 * See comments in head_64.S -- not all platforms insert
434 * secondaries at __secondary_hold and wait at the spin
437 if (firmware_has_feature(FW_FEATURE_OPAL))
443 * When book3e boots from kexec, the ePAPR spin table does
446 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
449 void smp_release_cpus(void)
457 DBG(" -> smp_release_cpus()\n");
459 /* All secondary cpus are spinning on a common spinloop, release them
460 * all now so they can start to spin on their individual paca
461 * spinloops. For non SMP kernels, the secondary cpus never get out
462 * of the common spinloop.
465 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
467 *ptr = ppc_function_entry(generic_secondary_smp_init);
469 /* And wait a bit for them to catch up */
470 for (i = 0; i < 100000; i++) {
473 if (spinning_secondaries == 0)
477 DBG("spinning_secondaries = %d\n", spinning_secondaries);
479 DBG(" <- smp_release_cpus()\n");
481 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
484 * Initialize some remaining members of the ppc64_caches and systemcfg
486 * (at least until we get rid of them completely). This is mostly some
487 * cache informations about the CPU that will be used by cache flush
488 * routines and/or provided to userland
491 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
496 info->line_size = lsize;
497 info->block_size = bsize;
498 info->log_block_size = __ilog2(bsize);
500 info->blocks_per_page = PAGE_SIZE / bsize;
502 info->blocks_per_page = 0;
505 info->assoc = 0xffff;
507 info->assoc = size / (sets * lsize);
510 static bool __init parse_cache_info(struct device_node *np,
512 struct ppc_cache_info *info)
514 static const char *ipropnames[] __initdata = {
517 "i-cache-block-size",
520 static const char *dpropnames[] __initdata = {
523 "d-cache-block-size",
526 const char **propnames = icache ? ipropnames : dpropnames;
527 const __be32 *sizep, *lsizep, *bsizep, *setsp;
528 u32 size, lsize, bsize, sets;
533 lsize = bsize = cur_cpu_spec->dcache_bsize;
534 sizep = of_get_property(np, propnames[0], NULL);
536 size = be32_to_cpu(*sizep);
537 setsp = of_get_property(np, propnames[1], NULL);
539 sets = be32_to_cpu(*setsp);
540 bsizep = of_get_property(np, propnames[2], NULL);
541 lsizep = of_get_property(np, propnames[3], NULL);
547 lsize = be32_to_cpu(*lsizep);
549 bsize = be32_to_cpu(*bsizep);
550 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
554 * OF is weird .. it represents fully associative caches
555 * as "1 way" which doesn't make much sense and doesn't
556 * leave room for direct mapped. We'll assume that 0
557 * in OF means direct mapped for that reason.
564 init_cache_info(info, size, lsize, bsize, sets);
569 void __init initialize_cache_info(void)
571 struct device_node *cpu = NULL, *l2, *l3 = NULL;
574 DBG(" -> initialize_cache_info()\n");
577 * All shipping POWER8 machines have a firmware bug that
578 * puts incorrect information in the device-tree. This will
579 * be (hopefully) fixed for future chips but for now hard
580 * code the values if we are running on one of these
582 pvr = PVR_VER(mfspr(SPRN_PVR));
583 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
584 pvr == PVR_POWER8NVL) {
585 /* size lsize blk sets */
586 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
587 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
588 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
589 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
591 cpu = of_find_node_by_type(NULL, "cpu");
594 * We're assuming *all* of the CPUs have the same
595 * d-cache and i-cache sizes... -Peter
598 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
599 DBG("Argh, can't find dcache properties !\n");
601 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
602 DBG("Argh, can't find icache properties !\n");
605 * Try to find the L2 and L3 if any. Assume they are
606 * unified and use the D-side properties.
608 l2 = of_find_next_cache_node(cpu);
611 parse_cache_info(l2, false, &ppc64_caches.l2);
612 l3 = of_find_next_cache_node(l2);
616 parse_cache_info(l3, false, &ppc64_caches.l3);
621 /* For use by binfmt_elf */
622 dcache_bsize = ppc64_caches.l1d.block_size;
623 icache_bsize = ppc64_caches.l1i.block_size;
625 cur_cpu_spec->dcache_bsize = dcache_bsize;
626 cur_cpu_spec->icache_bsize = icache_bsize;
628 DBG(" <- initialize_cache_info()\n");
632 * This returns the limit below which memory accesses to the linear
633 * mapping are guarnateed not to cause an architectural exception (e.g.,
634 * TLB or SLB miss fault).
636 * This is used to allocate PACAs and various interrupt stacks that
637 * that are accessed early in interrupt handlers that must not cause
638 * re-entrant interrupts.
640 __init u64 ppc64_bolted_size(void)
642 #ifdef CONFIG_PPC_BOOK3E
643 /* Freescale BookE bolts the entire linear mapping */
644 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
645 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
646 return linear_map_top;
647 /* Other BookE, we assume the first GB is bolted */
650 /* BookS radix, does not take faults on linear mapping */
651 if (early_radix_enabled())
654 /* BookS hash, the first segment is bolted */
655 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
656 return 1UL << SID_SHIFT_1T;
657 return 1UL << SID_SHIFT;
661 static void *__init alloc_stack(unsigned long limit, int cpu)
665 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
667 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
668 MEMBLOCK_LOW_LIMIT, limit,
669 early_cpu_to_node(cpu));
671 panic("cannot allocate stacks");
676 void __init irqstack_early_init(void)
678 u64 limit = ppc64_bolted_size();
682 * Interrupt stacks must be in the first segment since we
683 * cannot afford to take SLB misses on them. They are not
684 * accessed in realmode.
686 for_each_possible_cpu(i) {
687 softirq_ctx[i] = alloc_stack(limit, i);
688 hardirq_ctx[i] = alloc_stack(limit, i);
692 #ifdef CONFIG_PPC_BOOK3E
693 void __init exc_lvl_early_init(void)
697 for_each_possible_cpu(i) {
700 sp = alloc_stack(ULONG_MAX, i);
702 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
704 sp = alloc_stack(ULONG_MAX, i);
706 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
708 sp = alloc_stack(ULONG_MAX, i);
709 mcheckirq_ctx[i] = sp;
710 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
713 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
714 patch_exception(0x040, exc_debug_debug_book3e);
719 * Stack space used when we detect a bad kernel stack pointer, and
720 * early in SMP boots before relocation is enabled. Exclusive emergency
721 * stack for machine checks.
723 void __init emergency_stack_init(void)
729 * Emergency stacks must be under 256MB, we cannot afford to take
730 * SLB misses on them. The ABI also requires them to be 128-byte
733 * Since we use these as temporary stacks during secondary CPU
734 * bringup, machine check, system reset, and HMI, we need to get
735 * at them in real mode. This means they must also be within the RMO
738 * The IRQ stacks allocated elsewhere in this file are zeroed and
739 * initialized in kernel/irq.c. These are initialized here in order
740 * to have emergency stacks available as early as possible.
742 limit = min(ppc64_bolted_size(), ppc64_rma_size);
744 for_each_possible_cpu(i) {
745 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
747 #ifdef CONFIG_PPC_BOOK3S_64
748 /* emergency stack for NMI exception handling. */
749 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
751 /* emergency stack for machine check exception handling. */
752 paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
758 #define PCPU_DYN_SIZE ()
760 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
762 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
763 MEMBLOCK_ALLOC_ACCESSIBLE,
764 early_cpu_to_node(cpu));
768 static void __init pcpu_fc_free(void *ptr, size_t size)
770 memblock_free(__pa(ptr), size);
773 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
775 if (early_cpu_to_node(from) == early_cpu_to_node(to))
776 return LOCAL_DISTANCE;
778 return REMOTE_DISTANCE;
781 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
782 EXPORT_SYMBOL(__per_cpu_offset);
784 void __init setup_per_cpu_areas(void)
786 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
793 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
794 * to group units. For larger mappings, use 1M atom which
795 * should be large enough to contain a number of units.
797 if (mmu_linear_psize == MMU_PAGE_4K)
798 atom_size = PAGE_SIZE;
802 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
803 pcpu_fc_alloc, pcpu_fc_free);
805 panic("cannot initialize percpu area (err=%d)", rc);
807 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
808 for_each_possible_cpu(cpu) {
809 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
810 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
815 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
816 unsigned long memory_block_size_bytes(void)
818 if (ppc_md.memory_block_size)
819 return ppc_md.memory_block_size();
821 return MIN_MEMORY_BLOCK_SIZE;
825 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
826 struct ppc_pci_io ppc_pci_io;
827 EXPORT_SYMBOL(ppc_pci_io);
830 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
831 u64 hw_nmi_get_sample_period(int watchdog_thresh)
833 return ppc_proc_freq * watchdog_thresh;
838 * The perf based hardlockup detector breaks PMU event based branches, so
839 * disable it by default. Book3S has a soft-nmi hardlockup detector based
840 * on the decrementer interrupt, so it does not suffer from this problem.
842 * It is likely to get false positives in VM guests, so disable it there
845 static int __init disable_hardlockup_detector(void)
847 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
848 hardlockup_detector_disable();
850 if (firmware_has_feature(FW_FEATURE_LPAR))
851 hardlockup_detector_disable();
856 early_initcall(disable_hardlockup_detector);
858 #ifdef CONFIG_PPC_BOOK3S_64
859 static enum l1d_flush_type enabled_flush_types;
860 static void *l1d_flush_fallback_area;
861 static bool no_rfi_flush;
862 static bool no_entry_flush;
863 static bool no_uaccess_flush;
867 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
868 EXPORT_SYMBOL(uaccess_flush_key);
870 static int __init handle_no_rfi_flush(char *p)
872 pr_info("rfi-flush: disabled on command line.");
876 early_param("no_rfi_flush", handle_no_rfi_flush);
878 static int __init handle_no_entry_flush(char *p)
880 pr_info("entry-flush: disabled on command line.");
881 no_entry_flush = true;
884 early_param("no_entry_flush", handle_no_entry_flush);
886 static int __init handle_no_uaccess_flush(char *p)
888 pr_info("uaccess-flush: disabled on command line.");
889 no_uaccess_flush = true;
892 early_param("no_uaccess_flush", handle_no_uaccess_flush);
895 * The RFI flush is not KPTI, but because users will see doco that says to use
896 * nopti we hijack that option here to also disable the RFI flush.
898 static int __init handle_no_pti(char *p)
900 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
901 handle_no_rfi_flush(NULL);
904 early_param("nopti", handle_no_pti);
906 static void do_nothing(void *unused)
909 * We don't need to do the flush explicitly, just enter+exit kernel is
910 * sufficient, the RFI exit handlers will do the right thing.
914 void rfi_flush_enable(bool enable)
917 do_rfi_flush_fixups(enabled_flush_types);
918 on_each_cpu(do_nothing, NULL, 1);
920 do_rfi_flush_fixups(L1D_FLUSH_NONE);
925 void entry_flush_enable(bool enable)
928 do_entry_flush_fixups(enabled_flush_types);
929 on_each_cpu(do_nothing, NULL, 1);
931 do_entry_flush_fixups(L1D_FLUSH_NONE);
934 entry_flush = enable;
937 void uaccess_flush_enable(bool enable)
940 do_uaccess_flush_fixups(enabled_flush_types);
941 static_branch_enable(&uaccess_flush_key);
942 on_each_cpu(do_nothing, NULL, 1);
944 static_branch_disable(&uaccess_flush_key);
945 do_uaccess_flush_fixups(L1D_FLUSH_NONE);
948 uaccess_flush = enable;
951 static void __ref init_fallback_flush(void)
956 /* Only allocate the fallback flush area once (at boot time). */
957 if (l1d_flush_fallback_area)
960 l1d_size = ppc64_caches.l1d.size;
963 * If there is no d-cache-size property in the device tree, l1d_size
964 * could be zero. That leads to the loop in the asm wrapping around to
965 * 2^64-1, and then walking off the end of the fallback area and
966 * eventually causing a page fault which is fatal. Just default to
967 * something vaguely sane.
970 l1d_size = (64 * 1024);
972 limit = min(ppc64_bolted_size(), ppc64_rma_size);
975 * Align to L1d size, and size it at 2x L1d size, to catch possible
976 * hardware prefetch runoff. We don't have a recipe for load patterns to
977 * reliably avoid the prefetcher.
979 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
980 l1d_size, MEMBLOCK_LOW_LIMIT,
981 limit, NUMA_NO_NODE);
982 if (!l1d_flush_fallback_area)
983 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
984 __func__, l1d_size * 2, l1d_size, &limit);
987 for_each_possible_cpu(cpu) {
988 struct paca_struct *paca = paca_ptrs[cpu];
989 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
990 paca->l1d_flush_size = l1d_size;
994 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
996 if (types & L1D_FLUSH_FALLBACK) {
997 pr_info("rfi-flush: fallback displacement flush available\n");
998 init_fallback_flush();
1001 if (types & L1D_FLUSH_ORI)
1002 pr_info("rfi-flush: ori type flush available\n");
1004 if (types & L1D_FLUSH_MTTRIG)
1005 pr_info("rfi-flush: mttrig type flush available\n");
1007 enabled_flush_types = types;
1009 if (!cpu_mitigations_off() && !no_rfi_flush)
1010 rfi_flush_enable(enable);
1013 void setup_entry_flush(bool enable)
1015 if (cpu_mitigations_off())
1018 if (!no_entry_flush)
1019 entry_flush_enable(enable);
1022 void setup_uaccess_flush(bool enable)
1024 if (cpu_mitigations_off())
1027 if (!no_uaccess_flush)
1028 uaccess_flush_enable(enable);
1031 #ifdef CONFIG_DEBUG_FS
1032 static int rfi_flush_set(void *data, u64 val)
1043 /* Only do anything if we're changing state */
1044 if (enable != rfi_flush)
1045 rfi_flush_enable(enable);
1050 static int rfi_flush_get(void *data, u64 *val)
1052 *val = rfi_flush ? 1 : 0;
1056 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
1058 static int entry_flush_set(void *data, u64 val)
1069 /* Only do anything if we're changing state */
1070 if (enable != entry_flush)
1071 entry_flush_enable(enable);
1076 static int entry_flush_get(void *data, u64 *val)
1078 *val = entry_flush ? 1 : 0;
1082 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
1084 static int uaccess_flush_set(void *data, u64 val)
1095 /* Only do anything if we're changing state */
1096 if (enable != uaccess_flush)
1097 uaccess_flush_enable(enable);
1102 static int uaccess_flush_get(void *data, u64 *val)
1104 *val = uaccess_flush ? 1 : 0;
1108 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
1110 static __init int rfi_flush_debugfs_init(void)
1112 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
1113 debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
1114 debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
1117 device_initcall(rfi_flush_debugfs_init);
1119 #endif /* CONFIG_PPC_BOOK3S_64 */