3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/memory.h>
39 #include <linux/nmi.h>
40 #include <linux/debugfs.h>
43 #include <asm/kdump.h>
45 #include <asm/processor.h>
46 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/btext.h>
55 #include <asm/nvram.h>
56 #include <asm/setup.h>
58 #include <asm/iommu.h>
59 #include <asm/serial.h>
60 #include <asm/cache.h>
63 #include <asm/firmware.h>
66 #include <asm/kexec.h>
67 #include <asm/code-patching.h>
68 #include <asm/livepatch.h>
70 #include <asm/cputhreads.h>
73 #define DBG(fmt...) udbg_printf(fmt)
78 int spinning_secondaries;
81 /* Pick defaults since we might want to patch instructions
82 * before we've read this from the device tree.
84 struct ppc64_caches ppc64_caches = {
90 EXPORT_SYMBOL_GPL(ppc64_caches);
93 * These are used in binfmt_elf.c to put aux entries on the stack
94 * for each elf executable being started.
100 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
101 void __init setup_tlb_core_data(void)
105 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
107 for_each_possible_cpu(cpu) {
108 int first = cpu_first_thread_sibling(cpu);
111 * If we boot via kdump on a non-primary thread,
112 * make sure we point at the thread that actually
115 if (cpu_first_thread_sibling(boot_cpuid) == first)
118 paca[cpu].tcd_ptr = &paca[first].tcd;
121 * If we have threads, we need either tlbsrx.
122 * or e6500 tablewalk mode, or else TLB handlers
123 * will be racy and could produce duplicate entries.
125 if (smt_enabled_at_boot >= 2 &&
126 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
127 book3e_htw_mode != PPC_HTW_E6500) {
128 /* Should we panic instead? */
129 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
138 static char *smt_enabled_cmdline;
140 /* Look for ibm,smt-enabled OF option */
141 void __init check_smt_enabled(void)
143 struct device_node *dn;
144 const char *smt_option;
146 /* Default to enabling all threads */
147 smt_enabled_at_boot = threads_per_core;
149 /* Allow the command line to overrule the OF option */
150 if (smt_enabled_cmdline) {
151 if (!strcmp(smt_enabled_cmdline, "on"))
152 smt_enabled_at_boot = threads_per_core;
153 else if (!strcmp(smt_enabled_cmdline, "off"))
154 smt_enabled_at_boot = 0;
159 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
161 smt_enabled_at_boot =
162 min(threads_per_core, smt);
165 dn = of_find_node_by_path("/options");
167 smt_option = of_get_property(dn, "ibm,smt-enabled",
171 if (!strcmp(smt_option, "on"))
172 smt_enabled_at_boot = threads_per_core;
173 else if (!strcmp(smt_option, "off"))
174 smt_enabled_at_boot = 0;
182 /* Look for smt-enabled= cmdline option */
183 static int __init early_smt_enabled(char *p)
185 smt_enabled_cmdline = p;
188 early_param("smt-enabled", early_smt_enabled);
190 #endif /* CONFIG_SMP */
192 /** Fix up paca fields required for the boot cpu */
193 static void __init fixup_boot_paca(void)
195 /* The boot cpu is started */
196 get_paca()->cpu_start = 1;
197 /* Allow percpu accesses to work until we setup percpu data */
198 get_paca()->data_offset = 0;
201 static void __init configure_exceptions(void)
204 * Setup the trampolines from the lowmem exception vectors
205 * to the kdump kernel when not using a relocatable kernel.
207 setup_kdump_trampoline();
209 /* Under a PAPR hypervisor, we need hypercalls */
210 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
211 /* Enable AIL if possible */
212 pseries_enable_reloc_on_exc();
215 * Tell the hypervisor that we want our exceptions to
216 * be taken in little endian mode.
218 * We don't call this for big endian as our calling convention
219 * makes us always enter in BE, and the call may fail under
220 * some circumstances with kdump.
222 #ifdef __LITTLE_ENDIAN__
223 pseries_little_endian_exceptions();
226 /* Set endian mode using OPAL */
227 if (firmware_has_feature(FW_FEATURE_OPAL))
228 opal_configure_cores();
230 /* AIL on native is done in cpu_ready_for_interrupts() */
234 static void cpu_ready_for_interrupts(void)
237 * Enable AIL if supported, and we are in hypervisor mode. This
238 * is called once for every processor.
240 * If we are not in hypervisor mode the job is done once for
241 * the whole partition in configure_exceptions().
243 if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
244 early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
245 unsigned long lpcr = mfspr(SPRN_LPCR);
246 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
250 * Fixup HFSCR:TM based on CPU features. The bit is set by our
251 * early asm init because at that point we haven't updated our
252 * CPU features from firmware and device-tree. Here we have,
255 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
256 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
258 /* Set IR and DR in PACA MSR */
259 get_paca()->kernel_msr = MSR_KERNEL;
263 * Early initialization entry point. This is called by head.S
264 * with MMU translation disabled. We rely on the "feature" of
265 * the CPU that ignores the top 2 bits of the address in real
266 * mode so we can access kernel globals normally provided we
267 * only toy with things in the RMO region. From here, we do
268 * some early parsing of the device-tree to setup out MEMBLOCK
269 * data structures, and allocate & initialize the hash table
270 * and segment tables so we can start running with translation
273 * It is this function which will call the probe() callback of
274 * the various platform types and copy the matching one to the
275 * global ppc_md structure. Your platform can eventually do
276 * some very early initializations from the probe() routine, but
277 * this is not recommended, be very careful as, for example, the
278 * device-tree is not accessible via normal means at this point.
281 void __init early_setup(unsigned long dt_ptr)
283 static __initdata struct paca_struct boot_paca;
285 /* -------- printk is _NOT_ safe to use here ! ------- */
287 /* Identify CPU type */
288 identify_cpu(0, mfspr(SPRN_PVR));
290 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
291 initialise_paca(&boot_paca, 0);
292 setup_paca(&boot_paca);
295 /* -------- printk is now safe to use ------- */
297 /* Enable early debugging if any specified (see udbg.h) */
300 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
303 * Do early initialization using the flattened device
304 * tree, such as retrieving the physical memory map or
305 * calculating/retrieving the hash table size.
307 early_init_devtree(__va(dt_ptr));
309 /* Now we know the logical id of our boot cpu, setup the paca. */
310 setup_paca(&paca[boot_cpuid]);
314 * Configure exception handlers. This include setting up trampolines
315 * if needed, setting exception endian mode, etc...
317 configure_exceptions();
319 /* Apply all the dynamic patching */
320 apply_feature_fixups();
321 setup_feature_keys();
323 /* Initialize the hash table or TLB handling */
327 * At this point, we can let interrupts switch to virtual mode
328 * (the MMU has been setup), so adjust the MSR in the PACA to
329 * have IR and DR set and enable AIL if it exists
331 cpu_ready_for_interrupts();
333 DBG(" <- early_setup()\n");
335 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
337 * This needs to be done *last* (after the above DBG() even)
339 * Right after we return from this function, we turn on the MMU
340 * which means the real-mode access trick that btext does will
341 * no longer work, it needs to switch to using a real MMU
342 * mapping. This call will ensure that it does
345 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
349 void early_setup_secondary(void)
351 /* Mark interrupts disabled in PACA */
352 get_paca()->soft_enabled = 0;
354 /* Initialize the hash table or TLB handling */
355 early_init_mmu_secondary();
358 * At this point, we can let interrupts switch to virtual mode
359 * (the MMU has been setup), so adjust the MSR in the PACA to
360 * have IR and DR set.
362 cpu_ready_for_interrupts();
365 #endif /* CONFIG_SMP */
367 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
368 static bool use_spinloop(void)
370 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
374 * When book3e boots from kexec, the ePAPR spin table does
377 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
380 void smp_release_cpus(void)
388 DBG(" -> smp_release_cpus()\n");
390 /* All secondary cpus are spinning on a common spinloop, release them
391 * all now so they can start to spin on their individual paca
392 * spinloops. For non SMP kernels, the secondary cpus never get out
393 * of the common spinloop.
396 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
398 *ptr = ppc_function_entry(generic_secondary_smp_init);
400 /* And wait a bit for them to catch up */
401 for (i = 0; i < 100000; i++) {
404 if (spinning_secondaries == 0)
408 DBG("spinning_secondaries = %d\n", spinning_secondaries);
410 DBG(" <- smp_release_cpus()\n");
412 #endif /* CONFIG_SMP || CONFIG_KEXEC */
415 * Initialize some remaining members of the ppc64_caches and systemcfg
417 * (at least until we get rid of them completely). This is mostly some
418 * cache informations about the CPU that will be used by cache flush
419 * routines and/or provided to userland
421 void __init initialize_cache_info(void)
423 struct device_node *np;
424 unsigned long num_cpus = 0;
426 DBG(" -> initialize_cache_info()\n");
428 for_each_node_by_type(np, "cpu") {
432 * We're assuming *all* of the CPUs have the same
433 * d-cache and i-cache sizes... -Peter
436 const __be32 *sizep, *lsizep;
440 lsize = cur_cpu_spec->dcache_bsize;
441 sizep = of_get_property(np, "d-cache-size", NULL);
443 size = be32_to_cpu(*sizep);
444 lsizep = of_get_property(np, "d-cache-block-size",
446 /* fallback if block size missing */
448 lsizep = of_get_property(np,
452 lsize = be32_to_cpu(*lsizep);
453 if (sizep == NULL || lsizep == NULL)
454 DBG("Argh, can't find dcache properties ! "
455 "sizep: %p, lsizep: %p\n", sizep, lsizep);
457 ppc64_caches.dsize = size;
458 ppc64_caches.dline_size = lsize;
459 ppc64_caches.log_dline_size = __ilog2(lsize);
460 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
463 lsize = cur_cpu_spec->icache_bsize;
464 sizep = of_get_property(np, "i-cache-size", NULL);
466 size = be32_to_cpu(*sizep);
467 lsizep = of_get_property(np, "i-cache-block-size",
470 lsizep = of_get_property(np,
474 lsize = be32_to_cpu(*lsizep);
475 if (sizep == NULL || lsizep == NULL)
476 DBG("Argh, can't find icache properties ! "
477 "sizep: %p, lsizep: %p\n", sizep, lsizep);
479 ppc64_caches.isize = size;
480 ppc64_caches.iline_size = lsize;
481 ppc64_caches.log_iline_size = __ilog2(lsize);
482 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
486 /* For use by binfmt_elf */
487 dcache_bsize = ppc64_caches.dline_size;
488 icache_bsize = ppc64_caches.iline_size;
490 DBG(" <- initialize_cache_info()\n");
493 /* This returns the limit below which memory accesses to the linear
494 * mapping are guarnateed not to cause a TLB or SLB miss. This is
495 * used to allocate interrupt or emergency stacks for which our
496 * exception entry path doesn't deal with being interrupted.
498 static __init u64 safe_stack_limit(void)
500 #ifdef CONFIG_PPC_BOOK3E
501 /* Freescale BookE bolts the entire linear mapping */
502 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
503 return linear_map_top;
504 /* Other BookE, we assume the first GB is bolted */
507 /* BookS, the first segment is bolted */
508 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
509 return 1UL << SID_SHIFT_1T;
510 return 1UL << SID_SHIFT;
514 void __init irqstack_early_init(void)
516 u64 limit = safe_stack_limit();
520 * Interrupt stacks must be in the first segment since we
521 * cannot afford to take SLB misses on them.
523 for_each_possible_cpu(i) {
524 softirq_ctx[i] = (struct thread_info *)
525 __va(memblock_alloc_base(THREAD_SIZE,
526 THREAD_SIZE, limit));
527 hardirq_ctx[i] = (struct thread_info *)
528 __va(memblock_alloc_base(THREAD_SIZE,
529 THREAD_SIZE, limit));
533 #ifdef CONFIG_PPC_BOOK3E
534 void __init exc_lvl_early_init(void)
539 for_each_possible_cpu(i) {
540 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
541 critirq_ctx[i] = (struct thread_info *)__va(sp);
542 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
544 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
545 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
546 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
548 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
549 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
550 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
553 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
554 patch_exception(0x040, exc_debug_debug_book3e);
559 * Stack space used when we detect a bad kernel stack pointer, and
560 * early in SMP boots before relocation is enabled. Exclusive emergency
561 * stack for machine checks.
563 void __init emergency_stack_init(void)
569 * Emergency stacks must be under 256MB, we cannot afford to take
570 * SLB misses on them. The ABI also requires them to be 128-byte
573 * Since we use these as temporary stacks during secondary CPU
574 * bringup, we need to get at them in real mode. This means they
575 * must also be within the RMO region.
577 limit = min(safe_stack_limit(), ppc64_rma_size);
579 for_each_possible_cpu(i) {
580 struct thread_info *ti;
581 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
582 klp_init_thread_info(ti);
583 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
585 #ifdef CONFIG_PPC_BOOK3S_64
586 /* emergency stack for machine check exception handling. */
587 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
588 klp_init_thread_info(ti);
589 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
595 #define PCPU_DYN_SIZE ()
597 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
599 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
600 __pa(MAX_DMA_ADDRESS));
603 static void __init pcpu_fc_free(void *ptr, size_t size)
605 free_bootmem(__pa(ptr), size);
608 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
610 if (cpu_to_node(from) == cpu_to_node(to))
611 return LOCAL_DISTANCE;
613 return REMOTE_DISTANCE;
616 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
617 EXPORT_SYMBOL(__per_cpu_offset);
619 void __init setup_per_cpu_areas(void)
621 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
628 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
629 * to group units. For larger mappings, use 1M atom which
630 * should be large enough to contain a number of units.
632 if (mmu_linear_psize == MMU_PAGE_4K)
633 atom_size = PAGE_SIZE;
637 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
638 pcpu_fc_alloc, pcpu_fc_free);
640 panic("cannot initialize percpu area (err=%d)", rc);
642 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
643 for_each_possible_cpu(cpu) {
644 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
645 paca[cpu].data_offset = __per_cpu_offset[cpu];
650 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
651 unsigned long memory_block_size_bytes(void)
653 if (ppc_md.memory_block_size)
654 return ppc_md.memory_block_size();
656 return MIN_MEMORY_BLOCK_SIZE;
660 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
661 struct ppc_pci_io ppc_pci_io;
662 EXPORT_SYMBOL(ppc_pci_io);
665 #ifdef CONFIG_HARDLOCKUP_DETECTOR
666 u64 hw_nmi_get_sample_period(int watchdog_thresh)
668 return ppc_proc_freq * watchdog_thresh;
672 * The hardlockup detector breaks PMU event based branches and is likely
673 * to get false positives in KVM guests, so disable it by default.
675 static int __init disable_hardlockup_detector(void)
677 hardlockup_detector_disable();
681 early_initcall(disable_hardlockup_detector);
682 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
684 #ifdef CONFIG_PPC_BOOK3S_64
685 static enum l1d_flush_type enabled_flush_types;
686 static void *l1d_flush_fallback_area;
687 static bool no_rfi_flush;
688 static bool no_entry_flush;
689 static bool no_uaccess_flush;
693 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
694 EXPORT_SYMBOL(uaccess_flush_key);
696 static int __init handle_no_rfi_flush(char *p)
698 pr_info("rfi-flush: disabled on command line.");
702 early_param("no_rfi_flush", handle_no_rfi_flush);
704 static int __init handle_no_entry_flush(char *p)
706 pr_info("entry-flush: disabled on command line.");
707 no_entry_flush = true;
710 early_param("no_entry_flush", handle_no_entry_flush);
712 static int __init handle_no_uaccess_flush(char *p)
714 pr_info("uaccess-flush: disabled on command line.");
715 no_uaccess_flush = true;
718 early_param("no_uaccess_flush", handle_no_uaccess_flush);
721 * The RFI flush is not KPTI, but because users will see doco that says to use
722 * nopti we hijack that option here to also disable the RFI flush.
724 static int __init handle_no_pti(char *p)
726 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
727 handle_no_rfi_flush(NULL);
730 early_param("nopti", handle_no_pti);
732 static void do_nothing(void *unused)
735 * We don't need to do the flush explicitly, just enter+exit kernel is
736 * sufficient, the RFI exit handlers will do the right thing.
740 void rfi_flush_enable(bool enable)
743 do_rfi_flush_fixups(enabled_flush_types);
744 on_each_cpu(do_nothing, NULL, 1);
746 do_rfi_flush_fixups(L1D_FLUSH_NONE);
751 void entry_flush_enable(bool enable)
754 do_entry_flush_fixups(enabled_flush_types);
755 on_each_cpu(do_nothing, NULL, 1);
757 do_entry_flush_fixups(L1D_FLUSH_NONE);
760 entry_flush = enable;
763 void uaccess_flush_enable(bool enable)
766 do_uaccess_flush_fixups(enabled_flush_types);
767 static_branch_enable(&uaccess_flush_key);
768 on_each_cpu(do_nothing, NULL, 1);
770 static_branch_disable(&uaccess_flush_key);
771 do_uaccess_flush_fixups(L1D_FLUSH_NONE);
774 uaccess_flush = enable;
777 static void __ref init_fallback_flush(void)
782 /* Only allocate the fallback flush area once (at boot time). */
783 if (l1d_flush_fallback_area)
786 l1d_size = ppc64_caches.dsize;
787 limit = min(safe_stack_limit(), ppc64_rma_size);
790 * Align to L1d size, and size it at 2x L1d size, to catch possible
791 * hardware prefetch runoff. We don't have a recipe for load patterns to
792 * reliably avoid the prefetcher.
794 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
795 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
797 for_each_possible_cpu(cpu) {
798 paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
799 paca[cpu].l1d_flush_size = l1d_size;
803 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
805 if (types & L1D_FLUSH_FALLBACK) {
806 pr_info("rfi-flush: fallback displacement flush available\n");
807 init_fallback_flush();
810 if (types & L1D_FLUSH_ORI)
811 pr_info("rfi-flush: ori type flush available\n");
813 if (types & L1D_FLUSH_MTTRIG)
814 pr_info("rfi-flush: mttrig type flush available\n");
816 enabled_flush_types = types;
819 rfi_flush_enable(enable);
822 void setup_entry_flush(bool enable)
824 if (cpu_mitigations_off())
828 entry_flush_enable(enable);
831 void setup_uaccess_flush(bool enable)
833 if (cpu_mitigations_off())
836 if (!no_uaccess_flush)
837 uaccess_flush_enable(enable);
840 #ifdef CONFIG_DEBUG_FS
841 static int rfi_flush_set(void *data, u64 val)
852 /* Only do anything if we're changing state */
853 if (enable != rfi_flush)
854 rfi_flush_enable(enable);
859 static int rfi_flush_get(void *data, u64 *val)
861 *val = rfi_flush ? 1 : 0;
865 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
867 static int entry_flush_set(void *data, u64 val)
878 /* Only do anything if we're changing state */
879 if (enable != entry_flush)
880 entry_flush_enable(enable);
885 static int entry_flush_get(void *data, u64 *val)
887 *val = entry_flush ? 1 : 0;
891 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
893 static int uaccess_flush_set(void *data, u64 val)
904 /* Only do anything if we're changing state */
905 if (enable != uaccess_flush)
906 uaccess_flush_enable(enable);
911 static int uaccess_flush_get(void *data, u64 *val)
913 *val = uaccess_flush ? 1 : 0;
917 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
919 static __init int rfi_flush_debugfs_init(void)
921 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
922 debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
923 debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
926 device_initcall(rfi_flush_debugfs_init);
928 #endif /* CONFIG_PPC_BOOK3S_64 */