3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/memory.h>
40 #include <linux/nmi.h>
41 #include <linux/debugfs.h>
44 #include <asm/kdump.h>
46 #include <asm/processor.h>
47 #include <asm/pgtable.h>
50 #include <asm/machdep.h>
53 #include <asm/cputable.h>
54 #include <asm/sections.h>
55 #include <asm/btext.h>
56 #include <asm/nvram.h>
57 #include <asm/setup.h>
59 #include <asm/iommu.h>
60 #include <asm/serial.h>
61 #include <asm/cache.h>
64 #include <asm/firmware.h>
67 #include <asm/kexec.h>
68 #include <asm/mmu_context.h>
69 #include <asm/code-patching.h>
70 #include <asm/kvm_ppc.h>
71 #include <asm/hugetlb.h>
72 #include <asm/epapr_hcalls.h>
75 #define DBG(fmt...) udbg_printf(fmt)
80 int spinning_secondaries;
83 /* Pick defaults since we might want to patch instructions
84 * before we've read this from the device tree.
86 struct ppc64_caches ppc64_caches = {
92 EXPORT_SYMBOL_GPL(ppc64_caches);
95 * These are used in binfmt_elf.c to put aux entries on the stack
96 * for each elf executable being started.
102 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
103 static void setup_tlb_core_data(void)
107 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
109 for_each_possible_cpu(cpu) {
110 int first = cpu_first_thread_sibling(cpu);
113 * If we boot via kdump on a non-primary thread,
114 * make sure we point at the thread that actually
117 if (cpu_first_thread_sibling(boot_cpuid) == first)
120 paca[cpu].tcd_ptr = &paca[first].tcd;
123 * If we have threads, we need either tlbsrx.
124 * or e6500 tablewalk mode, or else TLB handlers
125 * will be racy and could produce duplicate entries.
127 if (smt_enabled_at_boot >= 2 &&
128 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
129 book3e_htw_mode != PPC_HTW_E6500) {
130 /* Should we panic instead? */
131 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
137 static void setup_tlb_core_data(void)
144 static char *smt_enabled_cmdline;
146 /* Look for ibm,smt-enabled OF option */
147 static void check_smt_enabled(void)
149 struct device_node *dn;
150 const char *smt_option;
152 /* Default to enabling all threads */
153 smt_enabled_at_boot = threads_per_core;
155 /* Allow the command line to overrule the OF option */
156 if (smt_enabled_cmdline) {
157 if (!strcmp(smt_enabled_cmdline, "on"))
158 smt_enabled_at_boot = threads_per_core;
159 else if (!strcmp(smt_enabled_cmdline, "off"))
160 smt_enabled_at_boot = 0;
165 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
167 smt_enabled_at_boot =
168 min(threads_per_core, smt);
171 dn = of_find_node_by_path("/options");
173 smt_option = of_get_property(dn, "ibm,smt-enabled",
177 if (!strcmp(smt_option, "on"))
178 smt_enabled_at_boot = threads_per_core;
179 else if (!strcmp(smt_option, "off"))
180 smt_enabled_at_boot = 0;
188 /* Look for smt-enabled= cmdline option */
189 static int __init early_smt_enabled(char *p)
191 smt_enabled_cmdline = p;
194 early_param("smt-enabled", early_smt_enabled);
197 #define check_smt_enabled()
198 #endif /* CONFIG_SMP */
200 /** Fix up paca fields required for the boot cpu */
201 static void fixup_boot_paca(void)
203 /* The boot cpu is started */
204 get_paca()->cpu_start = 1;
205 /* Allow percpu accesses to work until we setup percpu data */
206 get_paca()->data_offset = 0;
209 static void cpu_ready_for_interrupts(void)
211 /* Set IR and DR in PACA MSR */
212 get_paca()->kernel_msr = MSR_KERNEL;
215 * Enable AIL if supported, and we are in hypervisor mode. If we are
216 * not in hypervisor mode, we enable relocation-on interrupts later
217 * in pSeries_setup_arch() using the H_SET_MODE hcall.
219 if (cpu_has_feature(CPU_FTR_HVMODE) &&
220 cpu_has_feature(CPU_FTR_ARCH_207S)) {
221 unsigned long lpcr = mfspr(SPRN_LPCR);
222 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
226 * Fixup HFSCR:TM based on CPU features. The bit is set by our
227 * early asm init because at that point we haven't updated our
228 * CPU features from firmware and device-tree. Here we have,
231 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
232 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
236 * Early initialization entry point. This is called by head.S
237 * with MMU translation disabled. We rely on the "feature" of
238 * the CPU that ignores the top 2 bits of the address in real
239 * mode so we can access kernel globals normally provided we
240 * only toy with things in the RMO region. From here, we do
241 * some early parsing of the device-tree to setup out MEMBLOCK
242 * data structures, and allocate & initialize the hash table
243 * and segment tables so we can start running with translation
246 * It is this function which will call the probe() callback of
247 * the various platform types and copy the matching one to the
248 * global ppc_md structure. Your platform can eventually do
249 * some very early initializations from the probe() routine, but
250 * this is not recommended, be very careful as, for example, the
251 * device-tree is not accessible via normal means at this point.
254 void __init early_setup(unsigned long dt_ptr)
256 static __initdata struct paca_struct boot_paca;
258 /* -------- printk is _NOT_ safe to use here ! ------- */
260 /* Identify CPU type */
261 identify_cpu(0, mfspr(SPRN_PVR));
263 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
264 initialise_paca(&boot_paca, 0);
265 setup_paca(&boot_paca);
268 /* Initialize lockdep early or else spinlocks will blow */
271 /* -------- printk is now safe to use ------- */
273 /* Enable early debugging if any specified (see udbg.h) */
276 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
279 * Do early initialization using the flattened device
280 * tree, such as retrieving the physical memory map or
281 * calculating/retrieving the hash table size.
283 early_init_devtree(__va(dt_ptr));
285 epapr_paravirt_early_init();
287 /* Now we know the logical id of our boot cpu, setup the paca. */
288 setup_paca(&paca[boot_cpuid]);
291 /* Probe the machine type */
294 setup_kdump_trampoline();
296 DBG("Found, Initializing memory management...\n");
298 /* Initialize the hash table or TLB handling */
302 * At this point, we can let interrupts switch to virtual mode
303 * (the MMU has been setup), so adjust the MSR in the PACA to
304 * have IR and DR set and enable AIL if it exists
306 cpu_ready_for_interrupts();
308 /* Reserve large chunks of memory for use by CMA for KVM */
312 * Reserve any gigantic pages requested on the command line.
313 * memblock needs to have been initialized by the time this is
314 * called since this will reserve memory.
316 reserve_hugetlb_gpages();
318 DBG(" <- early_setup()\n");
320 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
322 * This needs to be done *last* (after the above DBG() even)
324 * Right after we return from this function, we turn on the MMU
325 * which means the real-mode access trick that btext does will
326 * no longer work, it needs to switch to using a real MMU
327 * mapping. This call will ensure that it does
330 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
334 void early_setup_secondary(void)
336 /* Mark interrupts enabled in PACA */
337 get_paca()->soft_enabled = 0;
339 /* Initialize the hash table or TLB handling */
340 early_init_mmu_secondary();
343 * At this point, we can let interrupts switch to virtual mode
344 * (the MMU has been setup), so adjust the MSR in the PACA to
345 * have IR and DR set.
347 cpu_ready_for_interrupts();
350 #endif /* CONFIG_SMP */
352 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
353 static bool use_spinloop(void)
355 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
359 * When book3e boots from kexec, the ePAPR spin table does
362 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
365 void smp_release_cpus(void)
373 DBG(" -> smp_release_cpus()\n");
375 /* All secondary cpus are spinning on a common spinloop, release them
376 * all now so they can start to spin on their individual paca
377 * spinloops. For non SMP kernels, the secondary cpus never get out
378 * of the common spinloop.
381 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
383 *ptr = ppc_function_entry(generic_secondary_smp_init);
385 /* And wait a bit for them to catch up */
386 for (i = 0; i < 100000; i++) {
389 if (spinning_secondaries == 0)
393 DBG("spinning_secondaries = %d\n", spinning_secondaries);
395 DBG(" <- smp_release_cpus()\n");
397 #endif /* CONFIG_SMP || CONFIG_KEXEC */
400 * Initialize some remaining members of the ppc64_caches and systemcfg
402 * (at least until we get rid of them completely). This is mostly some
403 * cache informations about the CPU that will be used by cache flush
404 * routines and/or provided to userland
406 static void __init initialize_cache_info(void)
408 struct device_node *np;
409 unsigned long num_cpus = 0;
411 DBG(" -> initialize_cache_info()\n");
413 for_each_node_by_type(np, "cpu") {
417 * We're assuming *all* of the CPUs have the same
418 * d-cache and i-cache sizes... -Peter
421 const __be32 *sizep, *lsizep;
425 lsize = cur_cpu_spec->dcache_bsize;
426 sizep = of_get_property(np, "d-cache-size", NULL);
428 size = be32_to_cpu(*sizep);
429 lsizep = of_get_property(np, "d-cache-block-size",
431 /* fallback if block size missing */
433 lsizep = of_get_property(np,
437 lsize = be32_to_cpu(*lsizep);
438 if (sizep == NULL || lsizep == NULL)
439 DBG("Argh, can't find dcache properties ! "
440 "sizep: %p, lsizep: %p\n", sizep, lsizep);
442 ppc64_caches.dsize = size;
443 ppc64_caches.dline_size = lsize;
444 ppc64_caches.log_dline_size = __ilog2(lsize);
445 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
448 lsize = cur_cpu_spec->icache_bsize;
449 sizep = of_get_property(np, "i-cache-size", NULL);
451 size = be32_to_cpu(*sizep);
452 lsizep = of_get_property(np, "i-cache-block-size",
455 lsizep = of_get_property(np,
459 lsize = be32_to_cpu(*lsizep);
460 if (sizep == NULL || lsizep == NULL)
461 DBG("Argh, can't find icache properties ! "
462 "sizep: %p, lsizep: %p\n", sizep, lsizep);
464 ppc64_caches.isize = size;
465 ppc64_caches.iline_size = lsize;
466 ppc64_caches.log_iline_size = __ilog2(lsize);
467 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
471 DBG(" <- initialize_cache_info()\n");
476 * Do some initial setup of the system. The parameters are those which
477 * were passed in from the bootloader.
479 void __init setup_system(void)
481 DBG(" -> setup_system()\n");
483 /* Apply the CPUs-specific and firmware specific fixups to kernel
484 * text (nop out sections not relevant to this CPU or this firmware)
486 do_feature_fixups(cur_cpu_spec->cpu_features,
487 &__start___ftr_fixup, &__stop___ftr_fixup);
488 do_feature_fixups(cur_cpu_spec->mmu_features,
489 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
490 do_feature_fixups(powerpc_firmware_features,
491 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
492 do_lwsync_fixups(cur_cpu_spec->cpu_features,
493 &__start___lwsync_fixup, &__stop___lwsync_fixup);
497 * Unflatten the device-tree passed by prom_init or kexec
499 unflatten_device_tree();
502 * Fill the ppc64_caches & systemcfg structures with informations
503 * retrieved from the device-tree.
505 initialize_cache_info();
507 #ifdef CONFIG_PPC_RTAS
509 * Initialize RTAS if available
512 #endif /* CONFIG_PPC_RTAS */
515 * Check if we have an initrd provided via the device-tree
520 * Do some platform specific early initializations, that includes
521 * setting up the hash table pointers. It also sets up some interrupt-mapping
522 * related options that will be used by finish_device_tree()
524 if (ppc_md.init_early)
528 * We can discover serial ports now since the above did setup the
529 * hash table management for us, thus ioremap works. We do that early
530 * so that further code can be debugged
532 find_legacy_serial_ports();
535 * Register early console
537 register_early_udbg_console();
544 smp_setup_cpu_maps();
546 setup_tlb_core_data();
549 * Freescale Book3e parts spin in a loop provided by firmware,
550 * so smp_release_cpus() does nothing for them
552 #if defined(CONFIG_SMP)
553 /* Release secondary cpus out of their spinloops at 0x60 now that
554 * we can map physical -> logical CPU ids
559 pr_info("Starting Linux %s %s\n", init_utsname()->machine,
560 init_utsname()->version);
562 pr_info("-----------------------------------------------------\n");
563 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
564 pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
566 if (ppc64_caches.dline_size != 0x80)
567 pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
568 if (ppc64_caches.iline_size != 0x80)
569 pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
571 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
572 pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
573 pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
574 pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
575 cur_cpu_spec->cpu_user_features2);
576 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
577 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
579 #ifdef CONFIG_PPC_STD_MMU_64
581 pr_info("htab_address = 0x%p\n", htab_address);
583 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
586 if (PHYSICAL_START > 0)
587 pr_info("physical_start = 0x%llx\n",
588 (unsigned long long)PHYSICAL_START);
589 pr_info("-----------------------------------------------------\n");
591 DBG(" <- setup_system()\n");
594 /* This returns the limit below which memory accesses to the linear
595 * mapping are guarnateed not to cause a TLB or SLB miss. This is
596 * used to allocate interrupt or emergency stacks for which our
597 * exception entry path doesn't deal with being interrupted.
599 static u64 safe_stack_limit(void)
601 #ifdef CONFIG_PPC_BOOK3E
602 /* Freescale BookE bolts the entire linear mapping */
603 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
604 return linear_map_top;
605 /* Other BookE, we assume the first GB is bolted */
608 /* BookS, the first segment is bolted */
609 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
610 return 1UL << SID_SHIFT_1T;
611 return 1UL << SID_SHIFT;
615 static void __init irqstack_early_init(void)
617 u64 limit = safe_stack_limit();
621 * Interrupt stacks must be in the first segment since we
622 * cannot afford to take SLB misses on them.
624 for_each_possible_cpu(i) {
625 softirq_ctx[i] = (struct thread_info *)
626 __va(memblock_alloc_base(THREAD_SIZE,
627 THREAD_SIZE, limit));
628 hardirq_ctx[i] = (struct thread_info *)
629 __va(memblock_alloc_base(THREAD_SIZE,
630 THREAD_SIZE, limit));
634 #ifdef CONFIG_PPC_BOOK3E
635 static void __init exc_lvl_early_init(void)
640 for_each_possible_cpu(i) {
641 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
642 critirq_ctx[i] = (struct thread_info *)__va(sp);
643 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
645 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
646 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
647 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
649 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
650 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
651 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
654 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
655 patch_exception(0x040, exc_debug_debug_book3e);
658 #define exc_lvl_early_init()
662 * Stack space used when we detect a bad kernel stack pointer, and
663 * early in SMP boots before relocation is enabled. Exclusive emergency
664 * stack for machine checks.
666 static void __init emergency_stack_init(void)
672 * Emergency stacks must be under 256MB, we cannot afford to take
673 * SLB misses on them. The ABI also requires them to be 128-byte
676 * Since we use these as temporary stacks during secondary CPU
677 * bringup, we need to get at them in real mode. This means they
678 * must also be within the RMO region.
680 limit = min(safe_stack_limit(), ppc64_rma_size);
682 for_each_possible_cpu(i) {
684 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
686 paca[i].emergency_sp = __va(sp);
688 #ifdef CONFIG_PPC_BOOK3S_64
689 /* emergency stack for machine check exception handling. */
690 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
692 paca[i].mc_emergency_sp = __va(sp);
698 * Called into from start_kernel this initializes memblock, which is used
699 * to manage page allocation until mem_init is called.
701 void __init setup_arch(char **cmdline_p)
703 *cmdline_p = boot_command_line;
706 * Set cache line size based on type of cpu as a default.
707 * Systems with OF can look in the properties on the cpu node(s)
708 * for a possibly more accurate value.
710 dcache_bsize = ppc64_caches.dline_size;
711 icache_bsize = ppc64_caches.iline_size;
716 init_mm.start_code = (unsigned long)_stext;
717 init_mm.end_code = (unsigned long) _etext;
718 init_mm.end_data = (unsigned long) _edata;
719 init_mm.brk = klimit;
720 #ifdef CONFIG_PPC_64K_PAGES
721 init_mm.context.pte_frag = NULL;
723 #ifdef CONFIG_SPAPR_TCE_IOMMU
724 mm_iommu_init(&init_mm.context);
726 irqstack_early_init();
727 exc_lvl_early_init();
728 emergency_stack_init();
732 #ifdef CONFIG_DUMMY_CONSOLE
733 conswitchp = &dummy_con;
736 if (ppc_md.setup_arch)
739 setup_barrier_nospec();
744 /* Initialize the MMU context management stuff */
747 /* Interrupt code needs to be 64K-aligned */
748 if ((unsigned long)_stext & 0xffff)
749 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
750 (unsigned long)_stext);
754 #define PCPU_DYN_SIZE ()
756 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
758 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
759 __pa(MAX_DMA_ADDRESS));
762 static void __init pcpu_fc_free(void *ptr, size_t size)
764 free_bootmem(__pa(ptr), size);
767 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
769 if (cpu_to_node(from) == cpu_to_node(to))
770 return LOCAL_DISTANCE;
772 return REMOTE_DISTANCE;
775 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
776 EXPORT_SYMBOL(__per_cpu_offset);
778 void __init setup_per_cpu_areas(void)
780 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
787 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
788 * to group units. For larger mappings, use 1M atom which
789 * should be large enough to contain a number of units.
791 if (mmu_linear_psize == MMU_PAGE_4K)
792 atom_size = PAGE_SIZE;
796 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
797 pcpu_fc_alloc, pcpu_fc_free);
799 panic("cannot initialize percpu area (err=%d)", rc);
801 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
802 for_each_possible_cpu(cpu) {
803 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
804 paca[cpu].data_offset = __per_cpu_offset[cpu];
809 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
810 unsigned long memory_block_size_bytes(void)
812 if (ppc_md.memory_block_size)
813 return ppc_md.memory_block_size();
815 return MIN_MEMORY_BLOCK_SIZE;
819 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
820 struct ppc_pci_io ppc_pci_io;
821 EXPORT_SYMBOL(ppc_pci_io);
824 #ifdef CONFIG_HARDLOCKUP_DETECTOR
825 u64 hw_nmi_get_sample_period(int watchdog_thresh)
827 return ppc_proc_freq * watchdog_thresh;
831 * The hardlockup detector breaks PMU event based branches and is likely
832 * to get false positives in KVM guests, so disable it by default.
834 static int __init disable_hardlockup_detector(void)
836 hardlockup_detector_disable();
840 early_initcall(disable_hardlockup_detector);
843 #ifdef CONFIG_PPC_BOOK3S_64
844 static enum l1d_flush_type enabled_flush_types;
845 static void *l1d_flush_fallback_area;
846 static bool no_rfi_flush;
847 static bool no_entry_flush;
848 static bool no_uaccess_flush;
852 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
853 EXPORT_SYMBOL(uaccess_flush_key);
855 static int __init handle_no_rfi_flush(char *p)
857 pr_info("rfi-flush: disabled on command line.");
861 early_param("no_rfi_flush", handle_no_rfi_flush);
863 static int __init handle_no_entry_flush(char *p)
865 pr_info("entry-flush: disabled on command line.");
866 no_entry_flush = true;
869 early_param("no_entry_flush", handle_no_entry_flush);
871 static int __init handle_no_uaccess_flush(char *p)
873 pr_info("uaccess-flush: disabled on command line.");
874 no_uaccess_flush = true;
877 early_param("no_uaccess_flush", handle_no_uaccess_flush);
880 * The RFI flush is not KPTI, but because users will see doco that says to use
881 * nopti we hijack that option here to also disable the RFI flush.
883 static int __init handle_no_pti(char *p)
885 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
886 handle_no_rfi_flush(NULL);
889 early_param("nopti", handle_no_pti);
891 static void do_nothing(void *unused)
894 * We don't need to do the flush explicitly, just enter+exit kernel is
895 * sufficient, the RFI exit handlers will do the right thing.
899 void rfi_flush_enable(bool enable)
902 do_rfi_flush_fixups(enabled_flush_types);
903 on_each_cpu(do_nothing, NULL, 1);
905 do_rfi_flush_fixups(L1D_FLUSH_NONE);
910 void entry_flush_enable(bool enable)
913 do_entry_flush_fixups(enabled_flush_types);
914 on_each_cpu(do_nothing, NULL, 1);
916 do_entry_flush_fixups(L1D_FLUSH_NONE);
919 entry_flush = enable;
922 void uaccess_flush_enable(bool enable)
925 do_uaccess_flush_fixups(enabled_flush_types);
926 if (static_key_initialized)
927 static_branch_enable(&uaccess_flush_key);
929 printk(KERN_DEBUG "uaccess-flush: deferring static key until after static key initialization\n");
930 on_each_cpu(do_nothing, NULL, 1);
932 static_branch_disable(&uaccess_flush_key);
933 do_uaccess_flush_fixups(L1D_FLUSH_NONE);
936 uaccess_flush = enable;
939 static void __ref init_fallback_flush(void)
944 /* Only allocate the fallback flush area once (at boot time). */
945 if (l1d_flush_fallback_area)
948 l1d_size = ppc64_caches.dsize;
949 limit = min(safe_stack_limit(), ppc64_rma_size);
952 * Align to L1d size, and size it at 2x L1d size, to catch possible
953 * hardware prefetch runoff. We don't have a recipe for load patterns to
954 * reliably avoid the prefetcher.
956 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
957 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
959 for_each_possible_cpu(cpu) {
960 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
961 paca[cpu].l1d_flush_size = l1d_size;
965 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
967 if (types & L1D_FLUSH_FALLBACK) {
968 pr_info("rfi-flush: fallback displacement flush available\n");
969 init_fallback_flush();
972 if (types & L1D_FLUSH_ORI)
973 pr_info("rfi-flush: ori type flush available\n");
975 if (types & L1D_FLUSH_MTTRIG)
976 pr_info("rfi-flush: mttrig type flush available\n");
978 enabled_flush_types = types;
981 rfi_flush_enable(enable);
984 void setup_entry_flush(bool enable)
986 if (cpu_mitigations_off())
990 entry_flush_enable(enable);
993 void setup_uaccess_flush(bool enable)
995 if (cpu_mitigations_off())
998 if (!no_uaccess_flush)
999 uaccess_flush_enable(enable);
1002 #ifdef CONFIG_DEBUG_FS
1003 static int rfi_flush_set(void *data, u64 val)
1014 /* Only do anything if we're changing state */
1015 if (enable != rfi_flush)
1016 rfi_flush_enable(enable);
1021 static int rfi_flush_get(void *data, u64 *val)
1023 *val = rfi_flush ? 1 : 0;
1027 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
1029 static int entry_flush_set(void *data, u64 val)
1040 /* Only do anything if we're changing state */
1041 if (enable != entry_flush)
1042 entry_flush_enable(enable);
1047 static int entry_flush_get(void *data, u64 *val)
1049 *val = entry_flush ? 1 : 0;
1053 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
1055 static int uaccess_flush_set(void *data, u64 val)
1066 /* Only do anything if we're changing state */
1067 if (enable != uaccess_flush)
1068 uaccess_flush_enable(enable);
1073 static int uaccess_flush_get(void *data, u64 *val)
1075 *val = uaccess_flush ? 1 : 0;
1079 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
1082 static __init int rfi_flush_debugfs_init(void)
1084 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
1085 debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
1086 debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
1089 device_initcall(rfi_flush_debugfs_init);
1093 * setup_uaccess_flush runs before jump_label_init, so we can't do the setup
1094 * there. Do it now instead.
1096 static __init int uaccess_flush_static_key_init(void)
1098 if (uaccess_flush) {
1099 printk(KERN_DEBUG "uaccess-flush: switching on static key\n");
1100 static_branch_enable(&uaccess_flush_key);
1104 early_initcall(uaccess_flush_static_key_init);
1105 #endif /* CONFIG_PPC_BOOK3S_64 */