1 // SPDX-License-Identifier: GPL-2.0
3 * Architecture-specific setup.
5 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 2000, 2004 Intel Corp
9 * Rohit Seth <rohit.seth@intel.com>
10 * Suresh Siddha <suresh.b.siddha@intel.com>
11 * Gordon Jin <gordon.jin@intel.com>
12 * Copyright (C) 1999 VA Linux Systems
13 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 * 12/26/04 S.Siddha, G.Jin, R.Seth
16 * Add multi-threading and multi-core detection
17 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
18 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
19 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
20 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
21 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
22 * 01/07/99 S.Eranian added the support for command line argument
23 * 06/24/99 W.Drummond added boot_cpu_data.
24 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/acpi.h>
30 #include <linux/bootmem.h>
31 #include <linux/console.h>
32 #include <linux/delay.h>
33 #include <linux/cpu.h>
34 #include <linux/kernel.h>
35 #include <linux/memblock.h>
36 #include <linux/reboot.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/clock.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/seq_file.h>
41 #include <linux/string.h>
42 #include <linux/threads.h>
43 #include <linux/screen_info.h>
44 #include <linux/dmi.h>
45 #include <linux/serial.h>
46 #include <linux/serial_core.h>
47 #include <linux/efi.h>
48 #include <linux/initrd.h>
50 #include <linux/cpufreq.h>
51 #include <linux/kexec.h>
52 #include <linux/crash_dump.h>
54 #include <asm/machvec.h>
56 #include <asm/meminit.h>
58 #include <asm/patch.h>
59 #include <asm/pgtable.h>
60 #include <asm/processor.h>
62 #include <asm/sections.h>
63 #include <asm/setup.h>
65 #include <asm/tlbflush.h>
66 #include <asm/unistd.h>
67 #include <asm/hpsim.h>
69 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
70 # error "struct cpuinfo_ia64 too big!"
74 unsigned long __per_cpu_offset[NR_CPUS];
75 EXPORT_SYMBOL(__per_cpu_offset);
78 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
79 EXPORT_SYMBOL(ia64_cpu_info);
80 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
82 EXPORT_SYMBOL(local_per_cpu_offset);
84 unsigned long ia64_cycles_per_usec;
85 struct ia64_boot_param *ia64_boot_param;
86 struct screen_info screen_info;
87 unsigned long vga_console_iobase;
88 unsigned long vga_console_membase;
90 static struct resource data_resource = {
91 .name = "Kernel data",
92 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
95 static struct resource code_resource = {
96 .name = "Kernel code",
97 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
100 static struct resource bss_resource = {
101 .name = "Kernel bss",
102 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
105 unsigned long ia64_max_cacheline_size;
107 unsigned long ia64_iobase; /* virtual address for I/O accesses */
108 EXPORT_SYMBOL(ia64_iobase);
109 struct io_space io_space[MAX_IO_SPACES];
110 EXPORT_SYMBOL(io_space);
111 unsigned int num_io_spaces;
114 * "flush_icache_range()" needs to know what processor dependent stride size to use
115 * when it makes i-cache(s) coherent with d-caches.
117 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
118 unsigned long ia64_i_cache_stride_shift = ~0;
120 * "clflush_cache_range()" needs to know what processor dependent stride size to
121 * use when it flushes cache lines including both d-cache and i-cache.
123 /* Safest way to go: 32 bytes by 32 bytes */
124 #define CACHE_STRIDE_SHIFT 5
125 unsigned long ia64_cache_stride_shift = ~0;
128 * We use a special marker for the end of memory and it uses the extra (+1) slot
130 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
131 int num_rsvd_regions __initdata;
135 * Filter incoming memory segments based on the primitive map created from the boot
136 * parameters. Segments contained in the map are removed from the memory ranges. A
137 * caller-specified function is called with the memory ranges that remain after filtering.
138 * This routine does not assume the incoming segments are sorted.
141 filter_rsvd_memory (u64 start, u64 end, void *arg)
143 u64 range_start, range_end, prev_start;
144 void (*func)(unsigned long, unsigned long, int);
148 if (start == PAGE_OFFSET) {
149 printk(KERN_WARNING "warning: skipping physical page 0\n");
151 if (start >= end) return 0;
155 * lowest possible address(walker uses virtual)
157 prev_start = PAGE_OFFSET;
160 for (i = 0; i < num_rsvd_regions; ++i) {
161 range_start = max(start, prev_start);
162 range_end = min(end, rsvd_region[i].start);
164 if (range_start < range_end)
165 call_pernode_memory(__pa(range_start), range_end - range_start, func);
167 /* nothing more available in this segment */
168 if (range_end == end) return 0;
170 prev_start = rsvd_region[i].end;
172 /* end of memory marker allows full processing inside loop body */
177 * Similar to "filter_rsvd_memory()", but the reserved memory ranges
178 * are not filtered out.
181 filter_memory(u64 start, u64 end, void *arg)
183 void (*func)(unsigned long, unsigned long, int);
186 if (start == PAGE_OFFSET) {
187 printk(KERN_WARNING "warning: skipping physical page 0\n");
195 call_pernode_memory(__pa(start), end - start, func);
200 sort_regions (struct rsvd_region *rsvd_region, int max)
204 /* simple bubble sorting */
206 for (j = 0; j < max; ++j) {
207 if (rsvd_region[j].start > rsvd_region[j+1].start) {
208 struct rsvd_region tmp;
209 tmp = rsvd_region[j];
210 rsvd_region[j] = rsvd_region[j + 1];
211 rsvd_region[j + 1] = tmp;
219 merge_regions (struct rsvd_region *rsvd_region, int max)
222 for (i = 1; i < max; ++i) {
223 if (rsvd_region[i].start >= rsvd_region[i-1].end)
225 if (rsvd_region[i].end > rsvd_region[i-1].end)
226 rsvd_region[i-1].end = rsvd_region[i].end;
228 memmove(&rsvd_region[i], &rsvd_region[i+1],
229 (max - i) * sizeof(struct rsvd_region));
235 * Request address space for all standard resources
237 static int __init register_memory(void)
239 code_resource.start = ia64_tpa(_text);
240 code_resource.end = ia64_tpa(_etext) - 1;
241 data_resource.start = ia64_tpa(_etext);
242 data_resource.end = ia64_tpa(_edata) - 1;
243 bss_resource.start = ia64_tpa(__bss_start);
244 bss_resource.end = ia64_tpa(_end) - 1;
245 efi_initialize_iomem_resources(&code_resource, &data_resource,
251 __initcall(register_memory);
257 * This function checks if the reserved crashkernel is allowed on the specific
258 * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
259 * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
260 * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
261 * in kdump case. See the comment in sba_init() in sba_iommu.c.
263 * So, the only machvec that really supports loading the kdump kernel
264 * over 4 GB is "sn2".
266 static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
268 if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
271 return pbase < (1UL << 32);
274 static void __init setup_crashkernel(unsigned long total, int *n)
276 unsigned long long base = 0, size = 0;
279 ret = parse_crashkernel(boot_command_line, total,
281 if (ret == 0 && size > 0) {
283 sort_regions(rsvd_region, *n);
284 *n = merge_regions(rsvd_region, *n);
285 base = kdump_find_rsvd_region(size,
289 if (!check_crashkernel_memory(base, size)) {
290 pr_warning("crashkernel: There would be kdump memory "
291 "at %ld GB but this is unusable because it "
292 "must\nbe below 4 GB. Change the memory "
293 "configuration of the machine.\n",
294 (unsigned long)(base >> 30));
299 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
300 "for crashkernel (System RAM: %ldMB)\n",
301 (unsigned long)(size >> 20),
302 (unsigned long)(base >> 20),
303 (unsigned long)(total >> 20));
304 rsvd_region[*n].start =
305 (unsigned long)__va(base);
306 rsvd_region[*n].end =
307 (unsigned long)__va(base + size);
309 crashk_res.start = base;
310 crashk_res.end = base + size - 1;
313 efi_memmap_res.start = ia64_boot_param->efi_memmap;
314 efi_memmap_res.end = efi_memmap_res.start +
315 ia64_boot_param->efi_memmap_size;
316 boot_param_res.start = __pa(ia64_boot_param);
317 boot_param_res.end = boot_param_res.start +
318 sizeof(*ia64_boot_param);
321 static inline void __init setup_crashkernel(unsigned long total, int *n)
326 * reserve_memory - setup reserved memory areas
328 * Setup the reserved memory areas set aside for the boot parameters,
329 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
330 * see arch/ia64/include/asm/meminit.h if you need to define more.
333 reserve_memory (void)
336 unsigned long total_memory;
339 * none of the entries in this table overlap
341 rsvd_region[n].start = (unsigned long) ia64_boot_param;
342 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
345 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
346 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
349 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
350 rsvd_region[n].end = (rsvd_region[n].start
351 + strlen(__va(ia64_boot_param->command_line)) + 1);
354 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
355 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
358 #ifdef CONFIG_BLK_DEV_INITRD
359 if (ia64_boot_param->initrd_start) {
360 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
361 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
366 #ifdef CONFIG_CRASH_DUMP
367 if (reserve_elfcorehdr(&rsvd_region[n].start,
368 &rsvd_region[n].end) == 0)
372 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
375 setup_crashkernel(total_memory, &n);
377 /* end of memory marker */
378 rsvd_region[n].start = ~0UL;
379 rsvd_region[n].end = ~0UL;
382 num_rsvd_regions = n;
383 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
385 sort_regions(rsvd_region, num_rsvd_regions);
386 num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
388 /* reserve all regions except the end of memory marker with memblock */
389 for (n = 0; n < num_rsvd_regions - 1; n++) {
390 struct rsvd_region *region = &rsvd_region[n];
391 phys_addr_t addr = __pa(region->start);
392 phys_addr_t size = region->end - region->start;
394 memblock_reserve(addr, size);
399 * find_initrd - get initrd parameters from the boot parameter structure
401 * Grab the initrd start and end from the boot parameter struct given us by
407 #ifdef CONFIG_BLK_DEV_INITRD
408 if (ia64_boot_param->initrd_start) {
409 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
410 initrd_end = initrd_start+ia64_boot_param->initrd_size;
412 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
413 initrd_start, ia64_boot_param->initrd_size);
421 unsigned long phys_iobase;
424 * Set `iobase' based on the EFI memory map or, failing that, the
425 * value firmware left in ar.k0.
427 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
428 * the port's virtual address, so ia32_load_state() loads it with a
429 * user virtual address. But in ia64 mode, glibc uses the
430 * *physical* address in ar.k0 to mmap the appropriate area from
431 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
432 * cases, user-mode can only use the legacy 0-64K I/O port space.
434 * ar.k0 is not involved in kernel I/O port accesses, which can use
435 * any of the I/O port spaces and are done via MMIO using the
436 * virtual mmio_base from the appropriate io_space[].
438 phys_iobase = efi_get_iobase();
440 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
441 printk(KERN_INFO "No I/O port range found in EFI memory map, "
442 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
444 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
445 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
447 /* setup legacy IO port space */
448 io_space[0].mmio_base = ia64_iobase;
449 io_space[0].sparse = 1;
454 * early_console_setup - setup debugging console
456 * Consoles started here require little enough setup that we can start using
457 * them very early in the boot process, either right after the machine
458 * vector initialization, or even before if the drivers can detect their hw.
460 * Returns non-zero if a console couldn't be setup.
462 static inline int __init
463 early_console_setup (char *cmdline)
467 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
469 extern int sn_serial_console_early_setup(void);
470 if (!sn_serial_console_early_setup())
474 #ifdef CONFIG_EFI_PCDP
475 if (!efi_setup_pcdp_console(cmdline))
478 if (!simcons_register())
481 return (earlycons) ? 0 : -1;
485 mark_bsp_online (void)
488 /* If we register an early console, allow CPU 0 to printk */
489 set_cpu_online(smp_processor_id(), true);
493 static __initdata int nomca;
494 static __init int setup_nomca(char *s)
499 early_param("nomca", setup_nomca);
501 #ifdef CONFIG_CRASH_DUMP
502 int __init reserve_elfcorehdr(u64 *start, u64 *end)
506 /* We get the address using the kernel command line,
507 * but the size is extracted from the EFI tables.
508 * Both address and size are required for reservation
512 if (!is_vmcore_usable())
515 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
520 *start = (unsigned long)__va(elfcorehdr_addr);
521 *end = *start + length;
525 #endif /* CONFIG_PROC_VMCORE */
528 setup_arch (char **cmdline_p)
532 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
534 *cmdline_p = __va(ia64_boot_param->command_line);
535 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
540 #ifdef CONFIG_IA64_GENERIC
541 /* machvec needs to be parsed from the command line
542 * before parse_early_param() is called to ensure
543 * that ia64_mv is initialised before any command line
544 * settings may cause console setup to occur
546 machvec_init_from_cmdline(*cmdline_p);
551 if (early_console_setup(*cmdline_p) == 0)
555 /* Initialize the ACPI boot-time table parser */
557 early_acpi_boot_init();
558 # ifdef CONFIG_ACPI_NUMA
561 # ifdef CONFIG_ACPI_HOTPLUG_CPU
562 prefill_possible_map();
564 per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
565 32 : cpumask_weight(&early_cpu_possible_map)),
566 additional_cpus > 0 ? additional_cpus : 0);
568 #endif /* CONFIG_APCI_BOOT */
575 /* process SAL system table: */
576 ia64_sal_init(__va(efi.sal_systab));
578 #ifdef CONFIG_ITANIUM
579 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
582 unsigned long num_phys_stacked;
584 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
585 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
590 cpu_physical_id(0) = hard_smp_processor_id();
593 cpu_init(); /* initialize the bootstrap CPU */
594 mmu_context_init(); /* initialize context_id bitmap */
598 # if defined(CONFIG_DUMMY_CONSOLE)
599 conswitchp = &dummy_con;
601 # if defined(CONFIG_VGA_CONSOLE)
603 * Non-legacy systems may route legacy VGA MMIO range to system
604 * memory. vga_con probes the MMIO hole, so memory looks like
605 * a VGA device to it. The EFI memory map can tell us if it's
606 * memory so we can avoid this problem.
608 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
609 conswitchp = &vga_con;
614 /* enable IA-64 Machine Check Abort Handling unless disabled */
618 platform_setup(cmdline_p);
619 #ifndef CONFIG_IA64_HP_SIM
620 check_sal_cache_flush();
624 clear_sched_clock_stable();
628 * Display cpu info for all CPUs.
631 show_cpuinfo (struct seq_file *m, void *v)
634 # define lpj c->loops_per_jiffy
635 # define cpunum c->cpu
637 # define lpj loops_per_jiffy
642 const char *feature_name;
644 { 1UL << 0, "branchlong" },
645 { 1UL << 1, "spontaneous deferral"},
646 { 1UL << 2, "16-byte atomic ops" }
648 char features[128], *cp, *sep;
649 struct cpuinfo_ia64 *c = v;
651 unsigned long proc_freq;
656 /* build the feature string: */
657 memcpy(features, "standard", 9);
659 size = sizeof(features);
661 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
662 if (mask & feature_bits[i].mask) {
663 cp += snprintf(cp, size, "%s%s", sep,
664 feature_bits[i].feature_name),
666 mask &= ~feature_bits[i].mask;
667 size = sizeof(features) - (cp - features);
670 if (mask && size > 1) {
671 /* print unknown features as a hex value */
672 snprintf(cp, size, "%s0x%lx", sep, mask);
675 proc_freq = cpufreq_quick_get(cpunum);
677 proc_freq = c->proc_freq / 1000;
691 "cpu MHz : %lu.%03lu\n"
692 "itc MHz : %lu.%06lu\n"
693 "BogoMIPS : %lu.%02lu\n",
694 cpunum, c->vendor, c->family, c->model,
695 c->model_name, c->revision, c->archrev,
696 features, c->ppn, c->number,
697 proc_freq / 1000, proc_freq % 1000,
698 c->itc_freq / 1000000, c->itc_freq % 1000000,
699 lpj*HZ/500000, (lpj*HZ/5000) % 100);
701 seq_printf(m, "siblings : %u\n",
702 cpumask_weight(&cpu_core_map[cpunum]));
703 if (c->socket_id != -1)
704 seq_printf(m, "physical id: %u\n", c->socket_id);
705 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
709 c->core_id, c->thread_id);
717 c_start (struct seq_file *m, loff_t *pos)
720 while (*pos < nr_cpu_ids && !cpu_online(*pos))
723 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
727 c_next (struct seq_file *m, void *v, loff_t *pos)
730 return c_start(m, pos);
734 c_stop (struct seq_file *m, void *v)
738 const struct seq_operations cpuinfo_op = {
746 static char brandname[MAX_BRANDS][128];
749 get_model_name(__u8 family, __u8 model)
755 memcpy(brand, "Unknown", 8);
756 if (ia64_pal_get_brand_info(brand)) {
758 memcpy(brand, "Merced", 7);
759 else if (family == 0x1f) switch (model) {
760 case 0: memcpy(brand, "McKinley", 9); break;
761 case 1: memcpy(brand, "Madison", 8); break;
762 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
765 for (i = 0; i < MAX_BRANDS; i++)
766 if (strcmp(brandname[i], brand) == 0)
768 for (i = 0; i < MAX_BRANDS; i++)
769 if (brandname[i][0] == '\0')
770 return strcpy(brandname[i], brand);
773 "%s: Table overflow. Some processor model information will be missing\n",
779 identify_cpu (struct cpuinfo_ia64 *c)
782 unsigned long bits[5];
788 u64 ppn; /* processor serial number */
792 unsigned revision : 8;
795 unsigned archrev : 8;
796 unsigned reserved : 24;
802 pal_vm_info_1_u_t vm1;
803 pal_vm_info_2_u_t vm2;
805 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
807 for (i = 0; i < 5; ++i)
808 cpuid.bits[i] = ia64_get_cpuid(i);
810 memcpy(c->vendor, cpuid.field.vendor, 16);
812 c->cpu = smp_processor_id();
814 /* below default values will be overwritten by identify_siblings()
815 * for Multi-Threading/Multi-Core capable CPUs
817 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
820 identify_siblings(c);
822 if (c->threads_per_core > smp_num_siblings)
823 smp_num_siblings = c->threads_per_core;
825 c->ppn = cpuid.field.ppn;
826 c->number = cpuid.field.number;
827 c->revision = cpuid.field.revision;
828 c->model = cpuid.field.model;
829 c->family = cpuid.field.family;
830 c->archrev = cpuid.field.archrev;
831 c->features = cpuid.field.features;
832 c->model_name = get_model_name(c->family, c->model);
834 status = ia64_pal_vm_summary(&vm1, &vm2);
835 if (status == PAL_STATUS_SUCCESS) {
836 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
837 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
839 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
840 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
844 * Do the following calculations:
846 * 1. the max. cache line size.
847 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
848 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
853 unsigned long line_size, max = 1;
854 unsigned long l, levels, unique_caches;
855 pal_cache_config_info_t cci;
858 status = ia64_pal_cache_summary(&levels, &unique_caches);
860 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
862 max = SMP_CACHE_BYTES;
863 /* Safest setup for "flush_icache_range()" */
864 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
865 /* Safest setup for "clflush_cache_range()" */
866 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
870 for (l = 0; l < levels; ++l) {
871 /* cache_type (data_or_unified)=2 */
872 status = ia64_pal_cache_config_info(l, 2, &cci);
874 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
875 "(l=%lu, 2) failed (status=%ld)\n",
876 __func__, l, status);
877 max = SMP_CACHE_BYTES;
878 /* The safest setup for "flush_icache_range()" */
879 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
880 /* The safest setup for "clflush_cache_range()" */
881 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
882 cci.pcci_unified = 1;
884 if (cci.pcci_stride < ia64_cache_stride_shift)
885 ia64_cache_stride_shift = cci.pcci_stride;
887 line_size = 1 << cci.pcci_line_size;
892 if (!cci.pcci_unified) {
893 /* cache_type (instruction)=1*/
894 status = ia64_pal_cache_config_info(l, 1, &cci);
896 printk(KERN_ERR "%s: ia64_pal_cache_config_info"
897 "(l=%lu, 1) failed (status=%ld)\n",
898 __func__, l, status);
899 /* The safest setup for flush_icache_range() */
900 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
903 if (cci.pcci_stride < ia64_i_cache_stride_shift)
904 ia64_i_cache_stride_shift = cci.pcci_stride;
907 if (max > ia64_max_cacheline_size)
908 ia64_max_cacheline_size = max;
912 * cpu_init() initializes state that is per-CPU. This function acts
913 * as a 'CPU state barrier', nothing should get across.
918 extern void ia64_mmu_init(void *);
919 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
920 unsigned long num_phys_stacked;
921 pal_vm_info_2_u_t vmi;
922 unsigned int max_ctx;
923 struct cpuinfo_ia64 *cpu_info;
926 cpu_data = per_cpu_init();
929 * insert boot cpu into sibling and core mapes
930 * (must be done after per_cpu area is setup)
932 if (smp_processor_id() == 0) {
933 cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
934 cpumask_set_cpu(0, &cpu_core_map[0]);
937 * Set ar.k3 so that assembly code in MCA handler can compute
938 * physical addresses of per cpu variables with a simple:
939 * phys = ar.k3 + &per_cpu_var
940 * and the alt-dtlb-miss handler can set per-cpu mapping into
941 * the TLB when needed. head.S already did this for cpu0.
943 ia64_set_kr(IA64_KR_PER_CPU_DATA,
944 ia64_tpa(cpu_data) - (long) __per_cpu_start);
951 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
952 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
953 * depends on the data returned by identify_cpu(). We break the dependency by
954 * accessing cpu_data() through the canonical per-CPU address.
956 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
957 identify_cpu(cpu_info);
959 #ifdef CONFIG_MCKINLEY
961 # define FEATURE_SET 16
962 struct ia64_pal_retval iprv;
964 if (cpu_info->family == 0x1f) {
965 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
966 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
967 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
968 (iprv.v1 | 0x80), FEATURE_SET, 0);
973 /* Clear the stack memory reserved for pt_regs: */
974 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
976 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
979 * Initialize the page-table base register to a global
980 * directory with all zeroes. This ensure that we can handle
981 * TLB-misses to user address-space even before we created the
982 * first user address-space. This may happen, e.g., due to
983 * aggressive use of lfetch.fault.
985 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
988 * Initialize default control register to defer speculative faults except
989 * for those arising from TLB misses, which are not deferred. The
990 * kernel MUST NOT depend on a particular setting of these bits (in other words,
991 * the kernel must have recovery code for all speculative accesses). Turn on
992 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
993 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
996 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
997 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
999 current->active_mm = &init_mm;
1000 BUG_ON(current->mm);
1002 ia64_mmu_init(ia64_imva(cpu_data));
1003 ia64_mca_cpu_init(ia64_imva(cpu_data));
1005 /* Clear ITC to eliminate sched_clock() overflows in human time. */
1008 /* disable all local interrupt sources: */
1009 ia64_set_itv(1 << 16);
1010 ia64_set_lrr0(1 << 16);
1011 ia64_set_lrr1(1 << 16);
1012 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1013 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1015 /* clear TPR & XTP to enable all interrupt classes: */
1016 ia64_setreg(_IA64_REG_CR_TPR, 0);
1018 /* Clear any pending interrupts left by SAL/EFI */
1019 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
1026 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1027 if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
1028 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1029 setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
1031 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1032 max_ctx = (1U << 15) - 1; /* use architected minimum */
1034 while (max_ctx < ia64_ctx.max_ctx) {
1035 unsigned int old = ia64_ctx.max_ctx;
1036 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1040 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1041 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1043 num_phys_stacked = 96;
1045 /* size of physical stacked register partition plus 8 bytes: */
1046 if (num_phys_stacked > max_num_phys_stacked) {
1047 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
1048 max_num_phys_stacked = num_phys_stacked;
1050 platform_cpu_init();
1056 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1057 (unsigned long) __end___mckinley_e9_bundles);
1060 static int __init run_dmi_scan(void)
1064 dmi_set_dump_stack_arch_desc();
1067 core_initcall(run_dmi_scan);