2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
17 #include <linux/bootmem.h>
18 #include <linux/efi.h>
19 #include <linux/memblock.h>
21 #include <linux/nmi.h>
22 #include <linux/swap.h>
24 #include <asm/meminit.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/sections.h>
30 #ifdef CONFIG_VIRTUAL_MEM_MAP
31 static unsigned long max_gap;
34 /* physical address where the bootmem map is located */
35 unsigned long bootmap_start;
38 static void *cpu_data;
40 * per_cpu_init - setup per-cpu variables
42 * Allocate and setup per-cpu data areas.
44 void *per_cpu_init(void)
46 static bool first_time = true;
47 void *cpu0_data = __cpu0_per_cpu;
55 * get_free_pages() cannot be used before cpu_init() done.
56 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
57 * to avoid that AP calls get_zeroed_page().
59 for_each_possible_cpu(cpu) {
60 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
62 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
63 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
64 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
67 * percpu area for cpu0 is moved from the __init area
68 * which is setup by head.S and used till this point.
69 * Update ar.k3. This move is ensures that percpu
70 * area for cpu0 is on the correct node and its
71 * virtual address isn't insanely far from other
72 * percpu areas which is important for congruent
76 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
77 (unsigned long)__per_cpu_start);
79 cpu_data += PERCPU_PAGE_SIZE;
82 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
86 alloc_per_cpu_data(void)
88 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
89 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
93 * setup_per_cpu_areas - setup percpu areas
95 * Arch code has already allocated and initialized percpu areas. All
96 * this function has to do is to teach the determined layout to the
97 * dynamic percpu allocator, which happens to be more complex than
98 * creating whole new ones using helpers.
101 setup_per_cpu_areas(void)
103 struct pcpu_alloc_info *ai;
104 struct pcpu_group_info *gi;
106 ssize_t static_size, reserved_size, dyn_size;
109 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
111 panic("failed to allocate pcpu_alloc_info");
114 /* units are assigned consecutively to possible cpus */
115 for_each_possible_cpu(cpu)
116 gi->cpu_map[gi->nr_units++] = cpu;
119 static_size = __per_cpu_end - __per_cpu_start;
120 reserved_size = PERCPU_MODULE_RESERVE;
121 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
123 panic("percpu area overflow static=%zd reserved=%zd\n",
124 static_size, reserved_size);
126 ai->static_size = static_size;
127 ai->reserved_size = reserved_size;
128 ai->dyn_size = dyn_size;
129 ai->unit_size = PERCPU_PAGE_SIZE;
130 ai->atom_size = PAGE_SIZE;
131 ai->alloc_size = PERCPU_PAGE_SIZE;
133 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
135 panic("failed to setup percpu area (err=%d)", rc);
137 pcpu_free_alloc_info(ai);
140 #define alloc_per_cpu_data() do { } while (0)
141 #endif /* CONFIG_SMP */
144 * find_memory - setup memory map
146 * Walk the EFI memory map and find usable memory for the system, taking
147 * into account reserved areas.
154 /* first find highest page frame number */
157 efi_memmap_walk(find_max_min_low_pfn, NULL);
158 max_pfn = max_low_pfn;
160 #ifdef CONFIG_VIRTUAL_MEM_MAP
161 efi_memmap_walk(filter_memory, register_active_ranges);
163 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
168 alloc_per_cpu_data();
172 * Set up the page tables.
178 unsigned long max_dma;
179 unsigned long max_zone_pfns[MAX_NR_ZONES];
181 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
182 #ifdef CONFIG_ZONE_DMA32
183 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
184 max_zone_pfns[ZONE_DMA32] = max_dma;
186 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
188 #ifdef CONFIG_VIRTUAL_MEM_MAP
189 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
190 if (max_gap < LARGE_GAP) {
191 vmem_map = (struct page *) 0;
193 unsigned long map_size;
195 /* allocate virtual_mem_map */
197 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
198 sizeof(struct page));
199 VMALLOC_END -= map_size;
200 vmem_map = (struct page *) VMALLOC_END;
201 efi_memmap_walk(create_mem_map_page_table, NULL);
204 * alloc_node_mem_map makes an adjustment for mem_map
205 * which isn't compatible with vmem_map.
207 NODE_DATA(0)->node_mem_map = vmem_map +
208 find_min_pfn_with_active_regions();
210 printk("Virtual mem_map starts at 0x%p\n", mem_map);
212 #endif /* !CONFIG_VIRTUAL_MEM_MAP */
213 free_area_init_nodes(max_zone_pfns);
214 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));