GNU Linux-libre 4.4.283-gnu1
[releases.git] / arch / mips / kernel / setup.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
29
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
32 #include <asm/bugs.h>
33 #include <asm/cache.h>
34 #include <asm/cdmm.h>
35 #include <asm/cpu.h>
36 #include <asm/debug.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp-ops.h>
40 #include <asm/prom.h>
41
42 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
43 const char __section(.appended_dtb) __appended_dtb[0x100000];
44 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
45
46 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
47
48 EXPORT_SYMBOL(cpu_data);
49
50 #ifdef CONFIG_VT
51 struct screen_info screen_info;
52 #endif
53
54 /*
55  * Despite it's name this variable is even if we don't have PCI
56  */
57 unsigned int PCI_DMA_BUS_IS_PHYS;
58
59 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60
61 /*
62  * Setup information
63  *
64  * These are initialized so they are in the .data section
65  */
66 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
67
68 EXPORT_SYMBOL(mips_machtype);
69
70 struct boot_mem_map boot_mem_map;
71
72 static char __initdata command_line[COMMAND_LINE_SIZE];
73 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
74
75 #ifdef CONFIG_CMDLINE_BOOL
76 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
77 #endif
78
79 /*
80  * mips_io_port_base is the begin of the address space to which x86 style
81  * I/O ports are mapped.
82  */
83 unsigned long mips_io_port_base = -1;
84 EXPORT_SYMBOL(mips_io_port_base);
85
86 static struct resource code_resource = { .name = "Kernel code", };
87 static struct resource data_resource = { .name = "Kernel data", };
88
89 static void *detect_magic __initdata = detect_memory_region;
90
91 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
92 {
93         int x = boot_mem_map.nr_map;
94         int i;
95
96         /* Sanity check */
97         if (start + size < start) {
98                 pr_warn("Trying to add an invalid memory region, skipped\n");
99                 return;
100         }
101
102         /*
103          * Try to merge with existing entry, if any.
104          */
105         for (i = 0; i < boot_mem_map.nr_map; i++) {
106                 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
107                 unsigned long top;
108
109                 if (entry->type != type)
110                         continue;
111
112                 if (start + size < entry->addr)
113                         continue;                       /* no overlap */
114
115                 if (entry->addr + entry->size < start)
116                         continue;                       /* no overlap */
117
118                 top = max(entry->addr + entry->size, start + size);
119                 entry->addr = min(entry->addr, start);
120                 entry->size = top - entry->addr;
121
122                 return;
123         }
124
125         if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
126                 pr_err("Ooops! Too many entries in the memory map!\n");
127                 return;
128         }
129
130         boot_mem_map.map[x].addr = start;
131         boot_mem_map.map[x].size = size;
132         boot_mem_map.map[x].type = type;
133         boot_mem_map.nr_map++;
134 }
135
136 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
137 {
138         void *dm = &detect_magic;
139         phys_addr_t size;
140
141         for (size = sz_min; size < sz_max; size <<= 1) {
142                 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
143                         break;
144         }
145
146         pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
147                 ((unsigned long long) size) / SZ_1M,
148                 (unsigned long long) start,
149                 ((unsigned long long) sz_min) / SZ_1M,
150                 ((unsigned long long) sz_max) / SZ_1M);
151
152         add_memory_region(start, size, BOOT_MEM_RAM);
153 }
154
155 bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
156 {
157         int i;
158         bool in_ram = false, free = true;
159
160         for (i = 0; i < boot_mem_map.nr_map; i++) {
161                 phys_addr_t start_, end_;
162
163                 start_ = boot_mem_map.map[i].addr;
164                 end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
165
166                 switch (boot_mem_map.map[i].type) {
167                 case BOOT_MEM_RAM:
168                         if (start >= start_ && start + size <= end_)
169                                 in_ram = true;
170                         break;
171                 case BOOT_MEM_RESERVED:
172                         if ((start >= start_ && start < end_) ||
173                             (start < start_ && start + size >= start_))
174                                 free = false;
175                         break;
176                 default:
177                         continue;
178                 }
179         }
180
181         return in_ram && free;
182 }
183
184 static void __init print_memory_map(void)
185 {
186         int i;
187         const int field = 2 * sizeof(unsigned long);
188
189         for (i = 0; i < boot_mem_map.nr_map; i++) {
190                 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
191                        field, (unsigned long long) boot_mem_map.map[i].size,
192                        field, (unsigned long long) boot_mem_map.map[i].addr);
193
194                 switch (boot_mem_map.map[i].type) {
195                 case BOOT_MEM_RAM:
196                         printk(KERN_CONT "(usable)\n");
197                         break;
198                 case BOOT_MEM_INIT_RAM:
199                         printk(KERN_CONT "(usable after init)\n");
200                         break;
201                 case BOOT_MEM_ROM_DATA:
202                         printk(KERN_CONT "(ROM data)\n");
203                         break;
204                 case BOOT_MEM_RESERVED:
205                         printk(KERN_CONT "(reserved)\n");
206                         break;
207                 default:
208                         printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
209                         break;
210                 }
211         }
212 }
213
214 /*
215  * Manage initrd
216  */
217 #ifdef CONFIG_BLK_DEV_INITRD
218
219 static int __init rd_start_early(char *p)
220 {
221         unsigned long start = memparse(p, &p);
222
223 #ifdef CONFIG_64BIT
224         /* Guess if the sign extension was forgotten by bootloader */
225         if (start < XKPHYS)
226                 start = (int)start;
227 #endif
228         initrd_start = start;
229         initrd_end += start;
230         return 0;
231 }
232 early_param("rd_start", rd_start_early);
233
234 static int __init rd_size_early(char *p)
235 {
236         initrd_end += memparse(p, &p);
237         return 0;
238 }
239 early_param("rd_size", rd_size_early);
240
241 /* it returns the next free pfn after initrd */
242 static unsigned long __init init_initrd(void)
243 {
244         unsigned long end;
245
246         /*
247          * Board specific code or command line parser should have
248          * already set up initrd_start and initrd_end. In these cases
249          * perfom sanity checks and use them if all looks good.
250          */
251         if (!initrd_start || initrd_end <= initrd_start)
252                 goto disable;
253
254         if (initrd_start & ~PAGE_MASK) {
255                 pr_err("initrd start must be page aligned\n");
256                 goto disable;
257         }
258         if (initrd_start < PAGE_OFFSET) {
259                 pr_err("initrd start < PAGE_OFFSET\n");
260                 goto disable;
261         }
262
263         /*
264          * Sanitize initrd addresses. For example firmware
265          * can't guess if they need to pass them through
266          * 64-bits values if the kernel has been built in pure
267          * 32-bit. We need also to switch from KSEG0 to XKPHYS
268          * addresses now, so the code can now safely use __pa().
269          */
270         end = __pa(initrd_end);
271         initrd_end = (unsigned long)__va(end);
272         initrd_start = (unsigned long)__va(__pa(initrd_start));
273
274         ROOT_DEV = Root_RAM0;
275         return PFN_UP(end);
276 disable:
277         initrd_start = 0;
278         initrd_end = 0;
279         return 0;
280 }
281
282 static void __init finalize_initrd(void)
283 {
284         unsigned long size = initrd_end - initrd_start;
285
286         if (size == 0) {
287                 printk(KERN_INFO "Initrd not found or empty");
288                 goto disable;
289         }
290         if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
291                 printk(KERN_ERR "Initrd extends beyond end of memory");
292                 goto disable;
293         }
294
295         reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
296         initrd_below_start_ok = 1;
297
298         pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
299                 initrd_start, size);
300         return;
301 disable:
302         printk(KERN_CONT " - disabling initrd\n");
303         initrd_start = 0;
304         initrd_end = 0;
305 }
306
307 #else  /* !CONFIG_BLK_DEV_INITRD */
308
309 static unsigned long __init init_initrd(void)
310 {
311         return 0;
312 }
313
314 #define finalize_initrd()       do {} while (0)
315
316 #endif
317
318 /*
319  * Initialize the bootmem allocator. It also setup initrd related data
320  * if needed.
321  */
322 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
323
324 static void __init bootmem_init(void)
325 {
326         init_initrd();
327         finalize_initrd();
328 }
329
330 #else  /* !CONFIG_SGI_IP27 */
331
332 static unsigned long __init bootmap_bytes(unsigned long pages)
333 {
334         unsigned long bytes = DIV_ROUND_UP(pages, 8);
335
336         return ALIGN(bytes, sizeof(long));
337 }
338
339 static void __init bootmem_init(void)
340 {
341         unsigned long reserved_end;
342         unsigned long mapstart = ~0UL;
343         unsigned long bootmap_size;
344         bool bootmap_valid = false;
345         int i;
346
347         /*
348          * Sanity check any INITRD first. We don't take it into account
349          * for bootmem setup initially, rely on the end-of-kernel-code
350          * as our memory range starting point. Once bootmem is inited we
351          * will reserve the area used for the initrd.
352          */
353         init_initrd();
354         reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
355
356         /*
357          * max_low_pfn is not a number of pages. The number of pages
358          * of the system is given by 'max_low_pfn - min_low_pfn'.
359          */
360         min_low_pfn = ~0UL;
361         max_low_pfn = 0;
362
363         /*
364          * Find the highest page frame number we have available.
365          */
366         for (i = 0; i < boot_mem_map.nr_map; i++) {
367                 unsigned long start, end;
368
369                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
370                         continue;
371
372                 start = PFN_UP(boot_mem_map.map[i].addr);
373                 end = PFN_DOWN(boot_mem_map.map[i].addr
374                                 + boot_mem_map.map[i].size);
375
376                 if (end > max_low_pfn)
377                         max_low_pfn = end;
378                 if (start < min_low_pfn)
379                         min_low_pfn = start;
380                 if (end <= reserved_end)
381                         continue;
382 #ifdef CONFIG_BLK_DEV_INITRD
383                 /* Skip zones before initrd and initrd itself */
384                 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
385                         continue;
386 #endif
387                 if (start >= mapstart)
388                         continue;
389                 mapstart = max(reserved_end, start);
390         }
391
392         if (min_low_pfn >= max_low_pfn)
393                 panic("Incorrect memory mapping !!!");
394         if (min_low_pfn > ARCH_PFN_OFFSET) {
395                 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
396                         (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
397                         min_low_pfn - ARCH_PFN_OFFSET);
398         } else if (min_low_pfn < ARCH_PFN_OFFSET) {
399                 pr_info("%lu free pages won't be used\n",
400                         ARCH_PFN_OFFSET - min_low_pfn);
401         }
402         min_low_pfn = ARCH_PFN_OFFSET;
403
404         /*
405          * Determine low and high memory ranges
406          */
407         max_pfn = max_low_pfn;
408         if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
409 #ifdef CONFIG_HIGHMEM
410                 highstart_pfn = PFN_DOWN(HIGHMEM_START);
411                 highend_pfn = max_low_pfn;
412 #endif
413                 max_low_pfn = PFN_DOWN(HIGHMEM_START);
414         }
415
416 #ifdef CONFIG_BLK_DEV_INITRD
417         /*
418          * mapstart should be after initrd_end
419          */
420         if (initrd_end)
421                 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
422 #endif
423
424         /*
425          * check that mapstart doesn't overlap with any of
426          * memory regions that have been reserved through eg. DTB
427          */
428         bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
429
430         bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
431                                                 bootmap_size);
432         for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
433                 unsigned long mapstart_addr;
434
435                 switch (boot_mem_map.map[i].type) {
436                 case BOOT_MEM_RESERVED:
437                         mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
438                                                 boot_mem_map.map[i].size);
439                         if (PHYS_PFN(mapstart_addr) < mapstart)
440                                 break;
441
442                         bootmap_valid = memory_region_available(mapstart_addr,
443                                                                 bootmap_size);
444                         if (bootmap_valid)
445                                 mapstart = PHYS_PFN(mapstart_addr);
446                         break;
447                 default:
448                         break;
449                 }
450         }
451
452         if (!bootmap_valid)
453                 panic("No memory area to place a bootmap bitmap");
454
455         /*
456          * Initialize the boot-time allocator with low memory only.
457          */
458         if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
459                                          min_low_pfn, max_low_pfn))
460                 panic("Unexpected memory size required for bootmap");
461
462         for (i = 0; i < boot_mem_map.nr_map; i++) {
463                 unsigned long start, end;
464
465                 start = PFN_UP(boot_mem_map.map[i].addr);
466                 end = PFN_DOWN(boot_mem_map.map[i].addr
467                                 + boot_mem_map.map[i].size);
468
469                 if (start <= min_low_pfn)
470                         start = min_low_pfn;
471                 if (start >= end)
472                         continue;
473
474 #ifndef CONFIG_HIGHMEM
475                 if (end > max_low_pfn)
476                         end = max_low_pfn;
477
478                 /*
479                  * ... finally, is the area going away?
480                  */
481                 if (end <= start)
482                         continue;
483 #endif
484
485                 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
486         }
487
488         /*
489          * Register fully available low RAM pages with the bootmem allocator.
490          */
491         for (i = 0; i < boot_mem_map.nr_map; i++) {
492                 unsigned long start, end, size;
493
494                 start = PFN_UP(boot_mem_map.map[i].addr);
495                 end   = PFN_DOWN(boot_mem_map.map[i].addr
496                                     + boot_mem_map.map[i].size);
497
498                 /*
499                  * Reserve usable memory.
500                  */
501                 switch (boot_mem_map.map[i].type) {
502                 case BOOT_MEM_RAM:
503                         break;
504                 case BOOT_MEM_INIT_RAM:
505                         memory_present(0, start, end);
506                         continue;
507                 default:
508                         /* Not usable memory */
509                         if (start > min_low_pfn && end < max_low_pfn)
510                                 reserve_bootmem(boot_mem_map.map[i].addr,
511                                                 boot_mem_map.map[i].size,
512                                                 BOOTMEM_DEFAULT);
513                         continue;
514                 }
515
516                 /*
517                  * We are rounding up the start address of usable memory
518                  * and at the end of the usable range downwards.
519                  */
520                 if (start >= max_low_pfn)
521                         continue;
522                 if (start < reserved_end)
523                         start = reserved_end;
524                 if (end > max_low_pfn)
525                         end = max_low_pfn;
526
527                 /*
528                  * ... finally, is the area going away?
529                  */
530                 if (end <= start)
531                         continue;
532                 size = end - start;
533
534                 /* Register lowmem ranges */
535                 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
536                 memory_present(0, start, end);
537         }
538
539         /*
540          * Reserve the bootmap memory.
541          */
542         reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
543
544         /*
545          * Reserve initrd memory if needed.
546          */
547         finalize_initrd();
548 }
549
550 #endif  /* CONFIG_SGI_IP27 */
551
552 /*
553  * arch_mem_init - initialize memory management subsystem
554  *
555  *  o plat_mem_setup() detects the memory configuration and will record detected
556  *    memory areas using add_memory_region.
557  *
558  * At this stage the memory configuration of the system is known to the
559  * kernel but generic memory management system is still entirely uninitialized.
560  *
561  *  o bootmem_init()
562  *  o sparse_init()
563  *  o paging_init()
564  *  o dma_contiguous_reserve()
565  *
566  * At this stage the bootmem allocator is ready to use.
567  *
568  * NOTE: historically plat_mem_setup did the entire platform initialization.
569  *       This was rather impractical because it meant plat_mem_setup had to
570  * get away without any kind of memory allocator.  To keep old code from
571  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
572  * initialization hook for anything else was introduced.
573  */
574
575 static int usermem __initdata;
576
577 static int __init early_parse_mem(char *p)
578 {
579         phys_addr_t start, size;
580
581         /*
582          * If a user specifies memory size, we
583          * blow away any automatically generated
584          * size.
585          */
586         if (usermem == 0) {
587                 boot_mem_map.nr_map = 0;
588                 usermem = 1;
589         }
590         start = 0;
591         size = memparse(p, &p);
592         if (*p == '@')
593                 start = memparse(p + 1, &p);
594
595         add_memory_region(start, size, BOOT_MEM_RAM);
596         return 0;
597 }
598 early_param("mem", early_parse_mem);
599
600 #ifdef CONFIG_PROC_VMCORE
601 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
602 static int __init early_parse_elfcorehdr(char *p)
603 {
604         int i;
605
606         setup_elfcorehdr = memparse(p, &p);
607
608         for (i = 0; i < boot_mem_map.nr_map; i++) {
609                 unsigned long start = boot_mem_map.map[i].addr;
610                 unsigned long end = (boot_mem_map.map[i].addr +
611                                      boot_mem_map.map[i].size);
612                 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
613                         /*
614                          * Reserve from the elf core header to the end of
615                          * the memory segment, that should all be kdump
616                          * reserved memory.
617                          */
618                         setup_elfcorehdr_size = end - setup_elfcorehdr;
619                         break;
620                 }
621         }
622         /*
623          * If we don't find it in the memory map, then we shouldn't
624          * have to worry about it, as the new kernel won't use it.
625          */
626         return 0;
627 }
628 early_param("elfcorehdr", early_parse_elfcorehdr);
629 #endif
630
631 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
632 {
633         phys_addr_t size;
634         int i;
635
636         size = end - mem;
637         if (!size)
638                 return;
639
640         /* Make sure it is in the boot_mem_map */
641         for (i = 0; i < boot_mem_map.nr_map; i++) {
642                 if (mem >= boot_mem_map.map[i].addr &&
643                     mem < (boot_mem_map.map[i].addr +
644                            boot_mem_map.map[i].size))
645                         return;
646         }
647         add_memory_region(mem, size, type);
648 }
649
650 #ifdef CONFIG_KEXEC
651 static inline unsigned long long get_total_mem(void)
652 {
653         unsigned long long total;
654
655         total = max_pfn - min_low_pfn;
656         return total << PAGE_SHIFT;
657 }
658
659 static void __init mips_parse_crashkernel(void)
660 {
661         unsigned long long total_mem;
662         unsigned long long crash_size, crash_base;
663         int ret;
664
665         total_mem = get_total_mem();
666         ret = parse_crashkernel(boot_command_line, total_mem,
667                                 &crash_size, &crash_base);
668         if (ret != 0 || crash_size <= 0)
669                 return;
670
671         crashk_res.start = crash_base;
672         crashk_res.end   = crash_base + crash_size - 1;
673 }
674
675 static void __init request_crashkernel(struct resource *res)
676 {
677         int ret;
678
679         ret = request_resource(res, &crashk_res);
680         if (!ret)
681                 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
682                         (unsigned long)((crashk_res.end -
683                                          crashk_res.start + 1) >> 20),
684                         (unsigned long)(crashk_res.start  >> 20));
685 }
686 #else /* !defined(CONFIG_KEXEC)         */
687 static void __init mips_parse_crashkernel(void)
688 {
689 }
690
691 static void __init request_crashkernel(struct resource *res)
692 {
693 }
694 #endif /* !defined(CONFIG_KEXEC)  */
695
696 #define USE_PROM_CMDLINE        IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
697 #define USE_DTB_CMDLINE         IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
698 #define EXTEND_WITH_PROM        IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
699
700 static void __init arch_mem_init(char **cmdline_p)
701 {
702         struct memblock_region *reg;
703         extern void plat_mem_setup(void);
704
705         /* call board setup routine */
706         plat_mem_setup();
707
708         /*
709          * Make sure all kernel memory is in the maps.  The "UP" and
710          * "DOWN" are opposite for initdata since if it crosses over
711          * into another memory section you don't want that to be
712          * freed when the initdata is freed.
713          */
714         arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
715                          PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
716                          BOOT_MEM_RAM);
717         arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
718                          PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
719                          BOOT_MEM_INIT_RAM);
720
721         pr_info("Determined physical RAM map:\n");
722         print_memory_map();
723
724 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
725         strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
726 #else
727         if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
728             (USE_DTB_CMDLINE && !boot_command_line[0]))
729                 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
730
731         if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
732                 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
733                 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
734         }
735
736 #if defined(CONFIG_CMDLINE_BOOL)
737         if (builtin_cmdline[0]) {
738                 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
739                 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
740         }
741 #endif
742 #endif
743         strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
744
745         *cmdline_p = command_line;
746
747         parse_early_param();
748
749         if (usermem) {
750                 pr_info("User-defined physical RAM map:\n");
751                 print_memory_map();
752         }
753
754         bootmem_init();
755 #ifdef CONFIG_PROC_VMCORE
756         if (setup_elfcorehdr && setup_elfcorehdr_size) {
757                 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
758                        setup_elfcorehdr, setup_elfcorehdr_size);
759                 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
760                                 BOOTMEM_DEFAULT);
761         }
762 #endif
763
764         mips_parse_crashkernel();
765 #ifdef CONFIG_KEXEC
766         if (crashk_res.start != crashk_res.end)
767                 reserve_bootmem(crashk_res.start,
768                                 crashk_res.end - crashk_res.start + 1,
769                                 BOOTMEM_DEFAULT);
770 #endif
771         device_tree_init();
772
773         /*
774          * In order to reduce the possibility of kernel panic when failed to
775          * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
776          * low memory as small as possible before plat_swiotlb_setup(), so
777          * make sparse_init() using top-down allocation.
778          */
779         memblock_set_bottom_up(false);
780         sparse_init();
781         memblock_set_bottom_up(true);
782
783         plat_swiotlb_setup();
784         paging_init();
785
786         dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
787         /* Tell bootmem about cma reserved memblock section */
788         for_each_memblock(reserved, reg)
789                 if (reg->size != 0)
790                         reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
791
792         reserve_bootmem_region(__pa_symbol(&__nosave_begin),
793                         __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
794 }
795
796 static void __init resource_init(void)
797 {
798         int i;
799
800         if (UNCAC_BASE != IO_BASE)
801                 return;
802
803         code_resource.start = __pa_symbol(&_text);
804         code_resource.end = __pa_symbol(&_etext) - 1;
805         data_resource.start = __pa_symbol(&_etext);
806         data_resource.end = __pa_symbol(&_edata) - 1;
807
808         for (i = 0; i < boot_mem_map.nr_map; i++) {
809                 struct resource *res;
810                 unsigned long start, end;
811
812                 start = boot_mem_map.map[i].addr;
813                 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
814                 if (start >= HIGHMEM_START)
815                         continue;
816                 if (end >= HIGHMEM_START)
817                         end = HIGHMEM_START - 1;
818
819                 res = alloc_bootmem(sizeof(struct resource));
820                 switch (boot_mem_map.map[i].type) {
821                 case BOOT_MEM_RAM:
822                 case BOOT_MEM_INIT_RAM:
823                 case BOOT_MEM_ROM_DATA:
824                         res->name = "System RAM";
825                         break;
826                 case BOOT_MEM_RESERVED:
827                 default:
828                         res->name = "reserved";
829                 }
830
831                 res->start = start;
832                 res->end = end;
833
834                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
835                 request_resource(&iomem_resource, res);
836
837                 /*
838                  *  We don't know which RAM region contains kernel data,
839                  *  so we try it repeatedly and let the resource manager
840                  *  test it.
841                  */
842                 request_resource(res, &code_resource);
843                 request_resource(res, &data_resource);
844                 request_crashkernel(res);
845         }
846 }
847
848 #ifdef CONFIG_SMP
849 static void __init prefill_possible_map(void)
850 {
851         int i, possible = num_possible_cpus();
852
853         if (possible > nr_cpu_ids)
854                 possible = nr_cpu_ids;
855
856         for (i = 0; i < possible; i++)
857                 set_cpu_possible(i, true);
858         for (; i < NR_CPUS; i++)
859                 set_cpu_possible(i, false);
860
861         nr_cpu_ids = possible;
862 }
863 #else
864 static inline void prefill_possible_map(void) {}
865 #endif
866
867 void __init setup_arch(char **cmdline_p)
868 {
869         cpu_probe();
870         prom_init();
871
872         setup_early_fdc_console();
873 #ifdef CONFIG_EARLY_PRINTK
874         setup_early_printk();
875 #endif
876         cpu_report();
877         check_bugs_early();
878
879 #if defined(CONFIG_VT)
880 #if defined(CONFIG_VGA_CONSOLE)
881         conswitchp = &vga_con;
882 #elif defined(CONFIG_DUMMY_CONSOLE)
883         conswitchp = &dummy_con;
884 #endif
885 #endif
886
887         arch_mem_init(cmdline_p);
888
889         resource_init();
890         plat_smp_setup();
891         prefill_possible_map();
892
893         cpu_cache_init();
894 }
895
896 unsigned long kernelsp[NR_CPUS];
897 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
898
899 #ifdef CONFIG_DEBUG_FS
900 struct dentry *mips_debugfs_dir;
901 static int __init debugfs_mips(void)
902 {
903         struct dentry *d;
904
905         d = debugfs_create_dir("mips", NULL);
906         if (!d)
907                 return -ENOMEM;
908         mips_debugfs_dir = d;
909         return 0;
910 }
911 arch_initcall(debugfs_mips);
912 #endif