2 * Common EFI memory map functions.
5 #define pr_fmt(fmt) "efi: " fmt
7 #include <linux/init.h>
8 #include <linux/kernel.h>
11 #include <asm/early_ioremap.h>
12 #include <linux/memblock.h>
13 #include <linux/slab.h>
15 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17 return memblock_alloc(size, 0);
20 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22 unsigned int order = get_order(size);
23 struct page *p = alloc_pages(GFP_KERNEL, order);
28 return PFN_PHYS(page_to_pfn(p));
32 * efi_memmap_alloc - Allocate memory for the EFI memory map
33 * @num_entries: Number of entries in the allocated map.
35 * Depending on whether mm_init() has already been invoked or not,
36 * either memblock or "normal" page allocation is used.
38 * Returns the physical address of the allocated memory map on
39 * success, zero on failure.
41 phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
43 unsigned long size = num_entries * efi.memmap.desc_size;
45 if (slab_is_available())
46 return __efi_memmap_alloc_late(size);
48 return __efi_memmap_alloc_early(size);
52 * __efi_memmap_init - Common code for mapping the EFI memory map
53 * @data: EFI memory map data
54 * @late: Use early or late mapping function?
56 * This function takes care of figuring out which function to use to
57 * map the EFI memory map in efi.memmap based on how far into the boot
60 * During bootup @late should be %false since we only have access to
61 * the early_memremap*() functions as the vmalloc space isn't setup.
62 * Once the kernel is fully booted we can fallback to the more robust
65 * Returns zero on success, a negative error code on failure.
68 __efi_memmap_init(struct efi_memory_map_data *data, bool late)
70 struct efi_memory_map map;
73 if (efi_enabled(EFI_PARAVIRT))
76 phys_map = data->phys_map;
79 map.map = memremap(phys_map, data->size, MEMREMAP_WB);
81 map.map = early_memremap(phys_map, data->size);
84 pr_err("Could not map the memory map!\n");
88 map.phys_map = data->phys_map;
89 map.nr_map = data->size / data->desc_size;
90 map.map_end = map.map + data->size;
92 map.desc_version = data->desc_version;
93 map.desc_size = data->desc_size;
96 set_bit(EFI_MEMMAP, &efi.flags);
104 * efi_memmap_init_early - Map the EFI memory map data structure
105 * @data: EFI memory map data
107 * Use early_memremap() to map the passed in EFI memory map and assign
110 int __init efi_memmap_init_early(struct efi_memory_map_data *data)
112 /* Cannot go backwards */
113 WARN_ON(efi.memmap.late);
115 return __efi_memmap_init(data, false);
118 void __init efi_memmap_unmap(void)
120 if (!efi_enabled(EFI_MEMMAP))
123 if (!efi.memmap.late) {
126 size = efi.memmap.desc_size * efi.memmap.nr_map;
127 early_memunmap(efi.memmap.map, size);
129 memunmap(efi.memmap.map);
132 efi.memmap.map = NULL;
133 clear_bit(EFI_MEMMAP, &efi.flags);
137 * efi_memmap_init_late - Map efi.memmap with memremap()
138 * @phys_addr: Physical address of the new EFI memory map
139 * @size: Size in bytes of the new EFI memory map
141 * Setup a mapping of the EFI memory map using ioremap_cache(). This
142 * function should only be called once the vmalloc space has been
143 * setup and is therefore not suitable for calling during early EFI
144 * initialise, e.g. in efi_init(). Additionally, it expects
145 * efi_memmap_init_early() to have already been called.
147 * The reason there are two EFI memmap initialisation
148 * (efi_memmap_init_early() and this late version) is because the
149 * early EFI memmap should be explicitly unmapped once EFI
150 * initialisation is complete as the fixmap space used to map the EFI
151 * memmap (via early_memremap()) is a scarce resource.
153 * This late mapping is intended to persist for the duration of
154 * runtime so that things like efi_mem_desc_lookup() and
155 * efi_mem_attributes() always work.
157 * Returns zero on success, a negative error code on failure.
159 int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
161 struct efi_memory_map_data data = {
166 /* Did we forget to unmap the early EFI memmap? */
167 WARN_ON(efi.memmap.map);
169 /* Were we already called? */
170 WARN_ON(efi.memmap.late);
173 * It makes no sense to allow callers to register different
174 * values for the following fields. Copy them out of the
175 * existing early EFI memmap.
177 data.desc_version = efi.memmap.desc_version;
178 data.desc_size = efi.memmap.desc_size;
180 return __efi_memmap_init(&data, true);
184 * efi_memmap_install - Install a new EFI memory map in efi.memmap
185 * @addr: Physical address of the memory map
186 * @nr_map: Number of entries in the memory map
188 * Unlike efi_memmap_init_*(), this function does not allow the caller
189 * to switch from early to late mappings. It simply uses the existing
190 * mapping function and installs the new memmap.
192 * Returns zero on success, a negative error code on failure.
194 int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
196 struct efi_memory_map_data data;
200 data.phys_map = addr;
201 data.size = efi.memmap.desc_size * nr_map;
202 data.desc_version = efi.memmap.desc_version;
203 data.desc_size = efi.memmap.desc_size;
205 return __efi_memmap_init(&data, efi.memmap.late);
209 * efi_memmap_split_count - Count number of additional EFI memmap entries
210 * @md: EFI memory descriptor to split
211 * @range: Address range (start, end) to split around
213 * Returns the number of additional EFI memmap entries required to
216 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
222 start = md->phys_addr;
223 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
225 /* modifying range */
226 m_start = range->start;
229 if (m_start <= start) {
230 /* split into 2 parts */
231 if (start < m_end && m_end < end)
235 if (start < m_start && m_start < end) {
236 /* split into 3 parts */
239 /* split into 2 parts */
248 * efi_memmap_insert - Insert a memory region in an EFI memmap
249 * @old_memmap: The existing EFI memory map structure
250 * @buf: Address of buffer to store new map
251 * @mem: Memory map entry to insert
253 * It is suggested that you call efi_memmap_split_count() first
254 * to see how large @buf needs to be.
256 void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
257 struct efi_mem_range *mem)
259 u64 m_start, m_end, m_attr;
260 efi_memory_desc_t *md;
264 /* modifying range */
265 m_start = mem->range.start;
266 m_end = mem->range.end;
267 m_attr = mem->attribute;
270 * The EFI memory map deals with regions in EFI_PAGE_SIZE
271 * units. Ensure that the region described by 'mem' is aligned
274 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
275 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
280 for (old = old_memmap->map, new = buf;
281 old < old_memmap->map_end;
282 old += old_memmap->desc_size, new += old_memmap->desc_size) {
284 /* copy original EFI memory descriptor */
285 memcpy(new, old, old_memmap->desc_size);
287 start = md->phys_addr;
288 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
290 if (m_start <= start && end <= m_end)
291 md->attribute |= m_attr;
293 if (m_start <= start &&
294 (start < m_end && m_end < end)) {
296 md->attribute |= m_attr;
297 md->num_pages = (m_end - md->phys_addr + 1) >>
300 new += old_memmap->desc_size;
301 memcpy(new, old, old_memmap->desc_size);
303 md->phys_addr = m_end + 1;
304 md->num_pages = (end - md->phys_addr + 1) >>
308 if ((start < m_start && m_start < end) && m_end < end) {
310 md->num_pages = (m_start - md->phys_addr) >>
313 new += old_memmap->desc_size;
314 memcpy(new, old, old_memmap->desc_size);
316 md->attribute |= m_attr;
317 md->phys_addr = m_start;
318 md->num_pages = (m_end - m_start + 1) >>
321 new += old_memmap->desc_size;
322 memcpy(new, old, old_memmap->desc_size);
324 md->phys_addr = m_end + 1;
325 md->num_pages = (end - m_end) >>
329 if ((start < m_start && m_start < end) &&
332 md->num_pages = (m_start - md->phys_addr) >>
335 new += old_memmap->desc_size;
336 memcpy(new, old, old_memmap->desc_size);
338 md->phys_addr = m_start;
339 md->num_pages = (end - md->phys_addr + 1) >>
341 md->attribute |= m_attr;