1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common boot and setup code for both 32-bit and 64-bit.
4 * Extracted from arch/powerpc/kernel/setup_64.c.
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
11 #include <linux/export.h>
12 #include <linux/panic_notifier.h>
13 #include <linux/string.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/reboot.h>
18 #include <linux/delay.h>
19 #include <linux/initrd.h>
20 #include <linux/platform_device.h>
21 #include <linux/printk.h>
22 #include <linux/seq_file.h>
23 #include <linux/ioport.h>
24 #include <linux/console.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/unistd.h>
28 #include <linux/seq_buf.h>
29 #include <linux/serial.h>
30 #include <linux/serial_8250.h>
31 #include <linux/percpu.h>
32 #include <linux/memblock.h>
34 #include <linux/of_fdt.h>
35 #include <linux/of_irq.h>
36 #include <linux/hugetlb.h>
37 #include <linux/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/vdso_datapage.h>
44 #include <asm/machdep.h>
46 #include <asm/cputable.h>
47 #include <asm/sections.h>
48 #include <asm/firmware.h>
49 #include <asm/btext.h>
50 #include <asm/nvram.h>
51 #include <asm/setup.h>
53 #include <asm/iommu.h>
54 #include <asm/serial.h>
55 #include <asm/cache.h>
59 #include <asm/cputhreads.h>
60 #include <mm/mmu_decl.h>
61 #include <asm/archrandom.h>
62 #include <asm/fadump.h>
64 #include <asm/hugetlb.h>
65 #include <asm/livepatch.h>
66 #include <asm/mmu_context.h>
67 #include <asm/cpu_has_feature.h>
68 #include <asm/kasan.h>
74 #define DBG(fmt...) udbg_printf(fmt)
79 /* The main machine-dep calls structure
81 struct machdep_calls ppc_md;
82 EXPORT_SYMBOL(ppc_md);
83 struct machdep_calls *machine_id;
84 EXPORT_SYMBOL(machine_id);
87 EXPORT_SYMBOL_GPL(boot_cpuid);
90 int boot_cpu_hwid = -1;
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
100 /* Variables required to store legacy IO irq routing */
101 int of_i8042_kbd_irq;
102 EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
103 int of_i8042_aux_irq;
104 EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
106 #ifdef __DO_IRQ_CANON
107 /* XXX should go elsewhere eventually */
108 int ppc_do_canonicalize_irqs;
109 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
112 #ifdef CONFIG_CRASH_CORE
113 /* This keeps a track of which one is the crashing cpu. */
114 int crashing_cpu = -1;
117 /* also used by kexec */
118 void machine_shutdown(void)
121 * if fadump is active, cleanup the fadump registration before we
126 if (ppc_md.machine_shutdown)
127 ppc_md.machine_shutdown();
130 static void machine_hang(void)
132 pr_emerg("System Halted, OK to turn off power\n");
138 void machine_restart(char *cmd)
146 do_kernel_restart(cmd);
152 void machine_power_off(void)
155 do_kernel_power_off();
159 /* Used by the G5 thermal driver */
160 EXPORT_SYMBOL_GPL(machine_power_off);
162 void (*pm_power_off)(void);
163 EXPORT_SYMBOL_GPL(pm_power_off);
165 size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
167 if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
171 EXPORT_SYMBOL(arch_get_random_seed_longs);
173 void machine_halt(void)
184 DEFINE_PER_CPU(unsigned int, cpu_pvr);
187 static void show_cpuinfo_summary(struct seq_file *m)
189 struct device_node *root;
190 const char *model = NULL;
191 unsigned long bogosum = 0;
194 if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
195 for_each_online_cpu(i)
196 bogosum += loops_per_jiffy;
197 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
198 bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
200 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
202 seq_printf(m, "platform\t: %s\n", ppc_md.name);
203 root = of_find_node_by_path("/");
205 model = of_get_property(root, "model", NULL);
207 seq_printf(m, "model\t\t: %s\n", model);
210 if (ppc_md.show_cpuinfo != NULL)
211 ppc_md.show_cpuinfo(m);
213 /* Display the amount of memory */
214 if (IS_ENABLED(CONFIG_PPC32))
215 seq_printf(m, "Memory\t\t: %d MB\n",
216 (unsigned int)(total_memory / (1024 * 1024)));
219 static int show_cpuinfo(struct seq_file *m, void *v)
221 unsigned long cpu_id = (unsigned long)v - 1;
223 unsigned long proc_freq;
228 pvr = per_cpu(cpu_pvr, cpu_id);
230 pvr = mfspr(SPRN_PVR);
232 maj = (pvr >> 8) & 0xFF;
235 seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
237 if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
238 seq_puts(m, cur_cpu_spec->cpu_name);
240 seq_printf(m, "unknown (%08x)", pvr);
242 if (cpu_has_feature(CPU_FTR_ALTIVEC))
243 seq_puts(m, ", altivec supported");
248 if (cpu_has_feature(CPU_FTR_TAU)) {
249 if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
250 /* more straightforward, but potentially misleading */
251 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
254 /* show the actual temp sensor range */
256 temp = cpu_temp_both(cpu_id);
257 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
258 temp & 0xff, temp >> 16);
261 #endif /* CONFIG_TAU */
264 * Platforms that have variable clock rates, should implement
265 * the method ppc_md.get_proc_freq() that reports the clock
266 * rate of a given cpu. The rest can use ppc_proc_freq to
267 * report the clock rate that is same across all cpus.
269 if (ppc_md.get_proc_freq)
270 proc_freq = ppc_md.get_proc_freq(cpu_id);
272 proc_freq = ppc_proc_freq;
275 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
276 proc_freq / 1000000, proc_freq % 1000000);
278 /* If we are a Freescale core do a simple check so
279 * we don't have to keep adding cases in the future */
280 if (PVR_VER(pvr) & 0x8000) {
281 switch (PVR_VER(pvr)) {
282 case 0x8000: /* 7441/7450/7451, Voyager */
283 case 0x8001: /* 7445/7455, Apollo 6 */
284 case 0x8002: /* 7447/7457, Apollo 7 */
285 case 0x8003: /* 7447A, Apollo 7 PM */
286 case 0x8004: /* 7448, Apollo 8 */
287 case 0x800c: /* 7410, Nitro */
288 maj = ((pvr >> 8) & 0xF);
291 default: /* e500/book-e */
297 switch (PVR_VER(pvr)) {
298 case 0x1008: /* 740P/750P ?? */
299 maj = ((pvr >> 8) & 0xFF) - 1;
302 case 0x004e: /* POWER9 bits 12-15 give chip type */
303 case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
304 maj = (pvr >> 8) & 0x0F;
308 maj = (pvr >> 8) & 0xFF;
314 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
315 maj, min, PVR_VER(pvr), PVR_REV(pvr));
317 if (IS_ENABLED(CONFIG_PPC32))
318 seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
319 (loops_per_jiffy / (5000 / HZ)) % 100);
323 /* If this is the last cpu, print the summary */
324 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
325 show_cpuinfo_summary(m);
330 static void *c_start(struct seq_file *m, loff_t *pos)
332 if (*pos == 0) /* just in case, cpu 0 is not the first */
333 *pos = cpumask_first(cpu_online_mask);
335 *pos = cpumask_next(*pos - 1, cpu_online_mask);
336 if ((*pos) < nr_cpu_ids)
337 return (void *)(unsigned long)(*pos + 1);
341 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
344 return c_start(m, pos);
347 static void c_stop(struct seq_file *m, void *v)
351 const struct seq_operations cpuinfo_op = {
355 .show = show_cpuinfo,
358 void __init check_for_initrd(void)
360 #ifdef CONFIG_BLK_DEV_INITRD
361 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
362 initrd_start, initrd_end);
364 /* If we were passed an initrd, set the ROOT_DEV properly if the values
365 * look sensible. If not, clear initrd reference.
367 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
368 initrd_end > initrd_start)
369 ROOT_DEV = Root_RAM0;
371 initrd_start = initrd_end = 0;
374 pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
376 DBG(" <- check_for_initrd()\n");
377 #endif /* CONFIG_BLK_DEV_INITRD */
382 int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
383 cpumask_t threads_core_mask __read_mostly;
384 EXPORT_SYMBOL_GPL(threads_per_core);
385 EXPORT_SYMBOL_GPL(threads_per_subcore);
386 EXPORT_SYMBOL_GPL(threads_shift);
387 EXPORT_SYMBOL_GPL(threads_core_mask);
389 static void __init cpu_init_thread_core_maps(int tpc)
393 threads_per_core = tpc;
394 threads_per_subcore = tpc;
395 cpumask_clear(&threads_core_mask);
397 /* This implementation only supports power of 2 number of threads
398 * for simplicity and performance
400 threads_shift = ilog2(tpc);
401 BUG_ON(tpc != (1 << threads_shift));
403 for (i = 0; i < tpc; i++)
404 cpumask_set_cpu(i, &threads_core_mask);
406 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
407 tpc, tpc > 1 ? "s" : "");
408 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
412 u32 *cpu_to_phys_id = NULL;
415 * setup_cpu_maps - initialize the following cpu maps:
419 * Having the possible map set up early allows us to restrict allocations
420 * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
422 * We do not initialize the online map here; cpus set their own bits in
423 * cpu_online_mask as they come up.
425 * This function is valid only for Open Firmware systems. finish_device_tree
426 * must be called before using this.
428 * While we're here, we may as well set the "physical" cpu ids in the paca.
430 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
432 void __init smp_setup_cpu_maps(void)
434 struct device_node *dn;
438 DBG("smp_setup_cpu_maps()\n");
440 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
443 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
444 __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
446 for_each_node_by_type(dn, "cpu") {
447 const __be32 *intserv;
451 DBG(" * %pOF...\n", dn);
453 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
456 DBG(" ibm,ppc-interrupt-server#s -> %lu threads\n",
457 (len / sizeof(int)));
459 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
460 intserv = of_get_property(dn, "reg", &len);
462 cpu_be = cpu_to_be32(cpu);
463 /* XXX: what is this? uninitialized?? */
464 intserv = &cpu_be; /* assume logical == phys */
469 nthreads = len / sizeof(int);
471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
474 DBG(" thread %d -> cpu %d (hard id %d)\n",
475 j, cpu, be32_to_cpu(intserv[j]));
477 avail = of_device_is_available(dn);
479 avail = !of_property_match_string(dn,
480 "enable-method", "spin-table");
482 set_cpu_present(cpu, avail);
483 set_cpu_possible(cpu, true);
484 cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
488 if (cpu >= nr_cpu_ids) {
494 /* If no SMT supported, nthreads is forced to 1 */
495 if (!cpu_has_feature(CPU_FTR_SMT)) {
496 DBG(" SMT disabled ! nthreads forced to 1\n");
502 * On pSeries LPAR, we need to know how many cpus
503 * could possibly be added to this partition.
505 if (firmware_has_feature(FW_FEATURE_LPAR) &&
506 (dn = of_find_node_by_path("/rtas"))) {
507 int num_addr_cell, num_size_cell, maxcpus;
510 num_addr_cell = of_n_addr_cells(dn);
511 num_size_cell = of_n_size_cells(dn);
513 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
518 maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
520 /* Double maxcpus for processors which have SMT capability */
521 if (cpu_has_feature(CPU_FTR_SMT))
524 if (maxcpus > nr_cpu_ids) {
526 "Partition configured for %d cpus, "
527 "operating system maximum is %u.\n",
528 maxcpus, nr_cpu_ids);
529 maxcpus = nr_cpu_ids;
531 printk(KERN_INFO "Partition configured for %d cpus.\n",
534 for (cpu = 0; cpu < maxcpus; cpu++)
535 set_cpu_possible(cpu, true);
539 vdso_data->processorCount = num_present_cpus();
540 #endif /* CONFIG_PPC64 */
542 /* Initialize CPU <=> thread mapping/
544 * WARNING: We assume that the number of threads is the same for
545 * every CPU in the system. If that is not the case, then some code
546 * here will have to be reworked
548 cpu_init_thread_core_maps(nthreads);
550 /* Now that possible cpus are set, set nr_cpu_ids for later use */
555 #endif /* CONFIG_SMP */
557 #ifdef CONFIG_PCSPKR_PLATFORM
558 static __init int add_pcspkr(void)
560 struct device_node *np;
561 struct platform_device *pd;
564 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
569 pd = platform_device_alloc("pcspkr", -1);
573 ret = platform_device_add(pd);
575 platform_device_put(pd);
579 device_initcall(add_pcspkr);
580 #endif /* CONFIG_PCSPKR_PLATFORM */
582 static char ppc_hw_desc_buf[128] __initdata;
584 struct seq_buf ppc_hw_desc __initdata = {
585 .buffer = ppc_hw_desc_buf,
586 .size = sizeof(ppc_hw_desc_buf),
590 static __init void probe_machine(void)
592 extern struct machdep_calls __machine_desc_start;
593 extern struct machdep_calls __machine_desc_end;
597 * Iterate all ppc_md structures until we find the proper
598 * one for the current machine type
600 DBG("Probing machine type ...\n");
603 * Check ppc_md is empty, if not we have a bug, ie, we setup an
604 * entry before probe_machine() which will be overwritten
606 for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
607 if (((void **)&ppc_md)[i]) {
608 printk(KERN_ERR "Entry %d in ppc_md non empty before"
609 " machine probe !\n", i);
613 for (machine_id = &__machine_desc_start;
614 machine_id < &__machine_desc_end;
616 DBG(" %s ...\n", machine_id->name);
617 if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible))
619 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
620 if (ppc_md.probe && !ppc_md.probe())
622 DBG(" %s match !\n", machine_id->name);
625 /* What can we do if we didn't find ? */
626 if (machine_id >= &__machine_desc_end) {
627 pr_err("No suitable machine description found !\n");
631 // Append the machine name to other info we've gathered
632 seq_buf_puts(&ppc_hw_desc, ppc_md.name);
634 // Set the generic hardware description shown in oopses
635 dump_stack_set_arch_desc(ppc_hw_desc.buffer);
637 pr_info("Hardware name: %s\n", ppc_hw_desc.buffer);
640 /* Match a class of boards, not a specific device configuration. */
641 int check_legacy_ioport(unsigned long base_port)
643 struct device_node *parent, *np = NULL;
648 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
649 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
651 parent = of_get_parent(np);
653 of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
654 if (!of_i8042_kbd_irq)
655 of_i8042_kbd_irq = 1;
657 of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
658 if (!of_i8042_aux_irq)
659 of_i8042_aux_irq = 12;
665 np = of_find_node_by_type(NULL, "8042");
666 /* Pegasos has no device_type on its 8042 node, look for the
669 np = of_find_node_by_name(NULL, "8042");
671 of_i8042_kbd_irq = 1;
672 of_i8042_aux_irq = 12;
675 case FDC_BASE: /* FDC1 */
676 np = of_find_node_by_type(NULL, "fdc");
679 /* ipmi is supposed to fail here */
684 parent = of_get_parent(np);
686 if (of_node_is_type(parent, "isa"))
693 EXPORT_SYMBOL(check_legacy_ioport);
696 * Panic notifiers setup
698 * We have 3 notifiers for powerpc, each one from a different "nature":
700 * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
701 * IRQs and deal with the Firmware-Assisted dump, when it is configured;
702 * should run early in the panic path.
704 * - dump_kernel_offset() is an informative notifier, just showing the KASLR
705 * offset if we have RANDOMIZE_BASE set.
707 * - ppc_panic_platform_handler() is a low-level handler that's registered
708 * only if the platform wishes to perform final actions in the panic path,
709 * hence it should run late and might not even return. Currently, only
710 * pseries and ps3 platforms register callbacks.
712 static int ppc_panic_fadump_handler(struct notifier_block *this,
713 unsigned long event, void *ptr)
716 * panic does a local_irq_disable, but we really
717 * want interrupts to be hard disabled.
722 * If firmware-assisted dump has been registered then trigger
723 * its callback and let the firmware handles everything else.
725 crash_fadump(NULL, ptr);
730 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
733 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
734 kaslr_offset(), KERNELBASE);
739 static int ppc_panic_platform_handler(struct notifier_block *this,
740 unsigned long event, void *ptr)
743 * This handler is only registered if we have a panic callback
744 * on ppc_md, hence NULL check is not needed.
745 * Also, it may not return, so it runs really late on panic path.
752 static struct notifier_block ppc_fadump_block = {
753 .notifier_call = ppc_panic_fadump_handler,
754 .priority = INT_MAX, /* run early, to notify the firmware ASAP */
757 static struct notifier_block kernel_offset_notifier = {
758 .notifier_call = dump_kernel_offset,
761 static struct notifier_block ppc_panic_block = {
762 .notifier_call = ppc_panic_platform_handler,
763 .priority = INT_MIN, /* may not return; must be done last */
766 void __init setup_panic(void)
768 /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
769 atomic_notifier_chain_register(&panic_notifier_list,
772 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
773 atomic_notifier_chain_register(&panic_notifier_list,
774 &kernel_offset_notifier);
776 /* Low-level platform-specific routines that should run on panic */
778 atomic_notifier_chain_register(&panic_notifier_list,
782 #ifdef CONFIG_CHECK_CACHE_COHERENCY
784 * For platforms that have configurable cache-coherency. This function
785 * checks that the cache coherency setting of the kernel matches the setting
786 * left by the firmware, as indicated in the device tree. Since a mismatch
787 * will eventually result in DMA failures, we print * and error and call
788 * BUG() in that case.
791 #define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
793 static int __init check_cache_coherency(void)
795 struct device_node *np;
797 bool devtree_coherency;
799 np = of_find_node_by_path("/");
800 prop = of_get_property(np, "coherency-off", NULL);
803 devtree_coherency = prop ? false : true;
805 if (devtree_coherency != KERNEL_COHERENCY) {
807 "kernel coherency:%s != device tree_coherency:%s\n",
808 KERNEL_COHERENCY ? "on" : "off",
809 devtree_coherency ? "on" : "off");
816 late_initcall(check_cache_coherency);
817 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
819 void ppc_printk_progress(char *s, unsigned short hex)
824 static __init void print_system_info(void)
826 pr_info("-----------------------------------------------------\n");
827 pr_info("phys_mem_size = 0x%llx\n",
828 (unsigned long long)memblock_phys_mem_size());
830 pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
831 pr_info("icache_bsize = 0x%x\n", icache_bsize);
833 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
834 pr_info(" possible = 0x%016lx\n",
835 (unsigned long)CPU_FTRS_POSSIBLE);
836 pr_info(" always = 0x%016lx\n",
837 (unsigned long)CPU_FTRS_ALWAYS);
838 pr_info("cpu_user_features = 0x%08x 0x%08x\n",
839 cur_cpu_spec->cpu_user_features,
840 cur_cpu_spec->cpu_user_features2);
841 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
843 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
844 #ifdef CONFIG_PPC_BOOK3S
845 pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START);
846 pr_info("IO start = 0x%lx\n", KERN_IO_START);
847 pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
851 if (!early_radix_enabled())
852 print_system_hash_info();
854 if (PHYSICAL_START > 0)
855 pr_info("physical_start = 0x%llx\n",
856 (unsigned long long)PHYSICAL_START);
857 pr_info("-----------------------------------------------------\n");
861 static void __init smp_setup_pacas(void)
865 for_each_possible_cpu(cpu) {
866 if (cpu == smp_processor_id())
869 set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
872 memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32));
873 cpu_to_phys_id = NULL;
878 * Called into from start_kernel this initializes memblock, which is used
879 * to manage page allocation until mem_init is called.
881 void __init setup_arch(char **cmdline_p)
885 *cmdline_p = boot_command_line;
887 /* Set a half-reasonable default so udelay does something sensible */
888 loops_per_jiffy = 500000000 / HZ;
890 /* Unflatten the device-tree passed by prom_init or kexec */
891 unflatten_device_tree();
894 * Initialize cache line/block info from device-tree (on ppc64) or
895 * just cputable (on ppc32).
897 initialize_cache_info();
899 /* Initialize RTAS if available. */
902 /* Check if we have an initrd provided via the device-tree. */
905 /* Probe the machine type, establish ppc_md. */
908 /* Setup panic notifier if requested by the platform. */
912 * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
913 * it from their respective probe() function.
917 /* Discover standard serial ports. */
918 find_legacy_serial_ports();
920 /* Register early console with the printk subsystem. */
921 register_early_udbg_console();
923 /* Setup the various CPU maps based on the device-tree. */
924 smp_setup_cpu_maps();
926 /* Initialize xmon. */
929 /* Check the SMT related command line arguments (ppc64). */
932 /* Parse memory topology */
933 mem_topology_setup();
934 /* Set max_mapnr before paging_init() */
935 set_max_mapnr(max_pfn);
938 * Release secondary cpus out of their spinloops at 0x60 now that
939 * we can map physical -> logical CPU ids.
941 * Freescale Book3e parts spin in a loop provided by firmware,
942 * so smp_release_cpus() does nothing for them.
947 /* On BookE, setup per-core TLB data structures. */
948 setup_tlb_core_data();
951 /* Print various info about the machine that has been gathered so far. */
954 klp_init_thread_info(&init_task);
956 setup_initial_init_mm(_stext, _etext, _edata, _end);
957 /* sched_init() does the mmgrab(&init_mm) for the primary CPU */
958 VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
959 cpumask_set_cpu(smp_processor_id(), mm_cpumask(&init_mm));
960 inc_mm_active_cpus(&init_mm);
961 mm_iommu_init(&init_mm);
963 irqstack_early_init();
964 exc_lvl_early_init();
965 emergency_stack_init();
973 * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
974 * be called after initmem_init(), so that pageblock_order is initialised.
977 gigantic_hugetlb_cma_reserve();
979 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
981 if (ppc_md.setup_arch)
984 setup_barrier_nospec();
989 /* Initialize the MMU context management stuff. */
992 /* Interrupt code needs to be 64K-aligned. */
993 if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
994 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
995 (unsigned long)_stext);