1 // SPDX-License-Identifier: GPL-2.0
3 * Machine specific setup for xen
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
8 #include <linux/init.h>
9 #include <linux/iscsi_ibft.h>
10 #include <linux/sched.h>
11 #include <linux/kstrtox.h>
14 #include <linux/memblock.h>
15 #include <linux/cpuidle.h>
16 #include <linux/cpufreq.h>
17 #include <linux/memory_hotplug.h>
21 #include <asm/e820/api.h>
22 #include <asm/setup.h>
25 #include <asm/idtentry.h>
26 #include <asm/xen/hypervisor.h>
27 #include <asm/xen/hypercall.h>
31 #include <xen/interface/callback.h>
32 #include <xen/interface/memory.h>
33 #include <xen/interface/physdev.h>
34 #include <xen/features.h>
35 #include <xen/hvc-console.h>
39 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
41 /* Number of pages released from the initial allocation. */
42 unsigned long xen_released_pages;
44 /* Memory map would allow PCI passthrough. */
45 bool xen_pv_pci_possible;
47 /* E820 map used during setting up memory. */
48 static struct e820_table xen_e820_table __initdata;
51 * Buffer used to remap identity mapped pages. We only need the virtual space.
52 * The physical page behind this address is remapped as needed to different
55 #define REMAP_SIZE (P2M_PER_PAGE - 3)
57 unsigned long next_area_mfn;
58 unsigned long target_pfn;
60 unsigned long mfns[REMAP_SIZE];
61 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
62 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
64 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
66 static void __init xen_parse_512gb(void)
71 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
75 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
78 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
81 xen_512gb_limit = val;
84 static void __init xen_del_extra_mem(unsigned long start_pfn,
88 unsigned long start_r, size_r;
90 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
91 start_r = xen_extra_mem[i].start_pfn;
92 size_r = xen_extra_mem[i].n_pfns;
94 /* Start of region. */
95 if (start_r == start_pfn) {
96 BUG_ON(n_pfns > size_r);
97 xen_extra_mem[i].start_pfn += n_pfns;
98 xen_extra_mem[i].n_pfns -= n_pfns;
102 if (start_r + size_r == start_pfn + n_pfns) {
103 BUG_ON(n_pfns > size_r);
104 xen_extra_mem[i].n_pfns -= n_pfns;
108 if (start_pfn > start_r && start_pfn < start_r + size_r) {
109 BUG_ON(start_pfn + n_pfns > start_r + size_r);
110 xen_extra_mem[i].n_pfns = start_pfn - start_r;
111 /* Calling memblock_reserve() again is okay. */
112 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
113 (start_pfn + n_pfns));
117 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
121 * Called during boot before the p2m list can take entries beyond the
122 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
125 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
129 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
130 if (pfn >= xen_extra_mem[i].start_pfn &&
131 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
132 return INVALID_P2M_ENTRY;
135 return IDENTITY_FRAME(pfn);
139 * Mark all pfns of extra mem as invalid in p2m list.
141 void __init xen_inv_extra_mem(void)
143 unsigned long pfn, pfn_s, pfn_e;
146 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
147 if (!xen_extra_mem[i].n_pfns)
149 pfn_s = xen_extra_mem[i].start_pfn;
150 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
151 for (pfn = pfn_s; pfn < pfn_e; pfn++)
152 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
157 * Finds the next RAM pfn available in the E820 map after min_pfn.
158 * This function updates min_pfn with the pfn found and returns
159 * the size of that range or zero if not found.
161 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
163 const struct e820_entry *entry = xen_e820_table.entries;
165 unsigned long done = 0;
167 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
171 if (entry->type != E820_TYPE_RAM)
174 e_pfn = PFN_DOWN(entry->addr + entry->size);
176 /* We only care about E820 after this */
177 if (e_pfn <= *min_pfn)
180 s_pfn = PFN_UP(entry->addr);
182 /* If min_pfn falls within the E820 entry, we want to start
183 * at the min_pfn PFN.
185 if (s_pfn <= *min_pfn) {
186 done = e_pfn - *min_pfn;
188 done = e_pfn - s_pfn;
197 static int __init xen_free_mfn(unsigned long mfn)
199 struct xen_memory_reservation reservation = {
205 set_xen_guest_handle(reservation.extent_start, &mfn);
206 reservation.nr_extents = 1;
208 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
212 * This releases a chunk of memory and then does the identity map. It's used
213 * as a fallback if the remapping fails.
215 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
216 unsigned long end_pfn, unsigned long nr_pages)
218 unsigned long pfn, end;
221 WARN_ON(start_pfn > end_pfn);
223 /* Release pages first. */
224 end = min(end_pfn, nr_pages);
225 for (pfn = start_pfn; pfn < end; pfn++) {
226 unsigned long mfn = pfn_to_mfn(pfn);
228 /* Make sure pfn exists to start with */
229 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
232 ret = xen_free_mfn(mfn);
233 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
236 xen_released_pages++;
237 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
243 set_phys_range_identity(start_pfn, end_pfn);
247 * Helper function to update the p2m and m2p tables and kernel mapping.
249 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
251 struct mmu_update update = {
252 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
257 if (!set_phys_to_machine(pfn, mfn)) {
258 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
264 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
265 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
270 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
271 mfn_pte(mfn, PAGE_KERNEL), 0)) {
272 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
279 * This function updates the p2m and m2p tables with an identity map from
280 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
281 * original allocation at remap_pfn. The information needed for remapping is
282 * saved in the memory itself to avoid the need for allocating buffers. The
283 * complete remap information is contained in a list of MFNs each containing
284 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
285 * This enables us to preserve the original mfn sequence while doing the
286 * remapping at a time when the memory management is capable of allocating
287 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
290 static void __init xen_do_set_identity_and_remap_chunk(
291 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
293 unsigned long buf = (unsigned long)&xen_remap_buf;
294 unsigned long mfn_save, mfn;
295 unsigned long ident_pfn_iter, remap_pfn_iter;
296 unsigned long ident_end_pfn = start_pfn + size;
297 unsigned long left = size;
298 unsigned int i, chunk;
302 mfn_save = virt_to_mfn((void *)buf);
304 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
305 ident_pfn_iter < ident_end_pfn;
306 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
307 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
309 /* Map first pfn to xen_remap_buf */
310 mfn = pfn_to_mfn(ident_pfn_iter);
311 set_pte_mfn(buf, mfn, PAGE_KERNEL);
313 /* Save mapping information in page */
314 xen_remap_buf.next_area_mfn = xen_remap_mfn;
315 xen_remap_buf.target_pfn = remap_pfn_iter;
316 xen_remap_buf.size = chunk;
317 for (i = 0; i < chunk; i++)
318 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
320 /* Put remap buf into list. */
323 /* Set identity map */
324 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
329 /* Restore old xen_remap_buf mapping */
330 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
334 * This function takes a contiguous pfn range that needs to be identity mapped
337 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
338 * 2) Calls the do_ function to actually do the mapping/remapping work.
340 * The goal is to not allocate additional memory but to remap the existing
341 * pages. In the case of an error the underlying memory is simply released back
342 * to Xen and not remapped.
344 static unsigned long __init xen_set_identity_and_remap_chunk(
345 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
346 unsigned long remap_pfn)
350 unsigned long n = end_pfn - start_pfn;
353 remap_pfn = nr_pages;
356 unsigned long cur_pfn = start_pfn + i;
357 unsigned long left = n - i;
358 unsigned long size = left;
359 unsigned long remap_range_size;
361 /* Do not remap pages beyond the current allocation */
362 if (cur_pfn >= nr_pages) {
363 /* Identity map remaining pages */
364 set_phys_range_identity(cur_pfn, cur_pfn + size);
367 if (cur_pfn + size > nr_pages)
368 size = nr_pages - cur_pfn;
370 remap_range_size = xen_find_pfn_range(&remap_pfn);
371 if (!remap_range_size) {
372 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
373 xen_set_identity_and_release_chunk(cur_pfn,
374 cur_pfn + left, nr_pages);
377 /* Adjust size to fit in current e820 RAM region */
378 if (size > remap_range_size)
379 size = remap_range_size;
381 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
383 /* Update variables to reflect new mappings. */
389 * If the PFNs are currently mapped, their VA mappings need to be
392 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
393 (void)HYPERVISOR_update_va_mapping(
394 (unsigned long)__va(pfn << PAGE_SHIFT),
395 native_make_pte(0), 0);
400 static unsigned long __init xen_count_remap_pages(
401 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
402 unsigned long remap_pages)
404 if (start_pfn >= nr_pages)
407 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
410 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
411 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
412 unsigned long nr_pages, unsigned long last_val))
414 phys_addr_t start = 0;
415 unsigned long ret_val = 0;
416 const struct e820_entry *entry = xen_e820_table.entries;
420 * Combine non-RAM regions and gaps until a RAM region (or the
421 * end of the map) is reached, then call the provided function
422 * to perform its duty on the non-RAM region.
424 * The combined non-RAM regions are rounded to a whole number
425 * of pages so any partial pages are accessible via the 1:1
426 * mapping. This is needed for some BIOSes that put (for
427 * example) the DMI tables in a reserved region that begins on
428 * a non-page boundary.
430 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
431 phys_addr_t end = entry->addr + entry->size;
432 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
433 unsigned long start_pfn = PFN_DOWN(start);
434 unsigned long end_pfn = PFN_UP(end);
436 if (entry->type == E820_TYPE_RAM)
437 end_pfn = PFN_UP(entry->addr);
439 if (start_pfn < end_pfn)
440 ret_val = func(start_pfn, end_pfn, nr_pages,
450 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
451 * The remap information (which mfn remap to which pfn) is contained in the
452 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
453 * This scheme allows to remap the different chunks in arbitrary order while
454 * the resulting mapping will be independent from the order.
456 void __init xen_remap_memory(void)
458 unsigned long buf = (unsigned long)&xen_remap_buf;
459 unsigned long mfn_save, pfn;
460 unsigned long remapped = 0;
462 unsigned long pfn_s = ~0UL;
463 unsigned long len = 0;
465 mfn_save = virt_to_mfn((void *)buf);
467 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
468 /* Map the remap information */
469 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
471 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
473 pfn = xen_remap_buf.target_pfn;
474 for (i = 0; i < xen_remap_buf.size; i++) {
475 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
479 if (pfn_s == ~0UL || pfn == pfn_s) {
480 pfn_s = xen_remap_buf.target_pfn;
481 len += xen_remap_buf.size;
482 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
483 len += xen_remap_buf.size;
485 xen_del_extra_mem(pfn_s, len);
486 pfn_s = xen_remap_buf.target_pfn;
487 len = xen_remap_buf.size;
489 xen_remap_mfn = xen_remap_buf.next_area_mfn;
492 if (pfn_s != ~0UL && len)
493 xen_del_extra_mem(pfn_s, len);
495 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
497 pr_info("Remapped %ld page(s)\n", remapped);
500 static unsigned long __init xen_get_pages_limit(void)
504 limit = MAXMEM / PAGE_SIZE;
505 if (!xen_initial_domain() && xen_512gb_limit)
506 limit = GB(512) / PAGE_SIZE;
511 static unsigned long __init xen_get_max_pages(void)
513 unsigned long max_pages, limit;
514 domid_t domid = DOMID_SELF;
517 limit = xen_get_pages_limit();
521 * For the initial domain we use the maximum reservation as
524 * For guest domains the current maximum reservation reflects
525 * the current maximum rather than the static maximum. In this
526 * case the e820 map provided to us will cover the static
529 if (xen_initial_domain()) {
530 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
535 return min(max_pages, limit);
538 static void __init xen_align_and_add_e820_region(phys_addr_t start,
539 phys_addr_t size, int type)
541 phys_addr_t end = start + size;
543 /* Align RAM regions to page boundaries. */
544 if (type == E820_TYPE_RAM) {
545 start = PAGE_ALIGN(start);
546 end &= ~((phys_addr_t)PAGE_SIZE - 1);
547 #ifdef CONFIG_MEMORY_HOTPLUG
549 * Don't allow adding memory not in E820 map while booting the
550 * system. Once the balloon driver is up it will remove that
557 e820__range_add(start, end - start, type);
560 static void __init xen_ignore_unusable(void)
562 struct e820_entry *entry = xen_e820_table.entries;
565 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
566 if (entry->type == E820_TYPE_UNUSABLE)
567 entry->type = E820_TYPE_RAM;
571 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
573 struct e820_entry *entry;
581 entry = xen_e820_table.entries;
583 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
584 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
585 (entry->addr + entry->size) >= end)
595 * Find a free area in physical memory not yet reserved and compliant with
597 * Used to relocate pre-allocated areas like initrd or p2m list which are in
598 * conflict with the to be used E820 map.
599 * In case no area is found, return 0. Otherwise return the physical address
600 * of the area which is already reserved for convenience.
602 phys_addr_t __init xen_find_free_area(phys_addr_t size)
605 phys_addr_t addr, start;
606 struct e820_entry *entry = xen_e820_table.entries;
608 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
609 if (entry->type != E820_TYPE_RAM || entry->size < size)
612 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
613 if (!memblock_is_reserved(addr))
615 start = addr + PAGE_SIZE;
616 if (start + size > entry->addr + entry->size)
619 if (addr >= start + size) {
620 memblock_reserve(start, size);
629 * Like memcpy, but with physical addresses for dest and src.
631 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
634 phys_addr_t dest_off, src_off, dest_len, src_len, len;
638 dest_off = dest & ~PAGE_MASK;
639 src_off = src & ~PAGE_MASK;
641 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
642 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
644 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
645 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
646 len = min(dest_len, src_len);
647 to = early_memremap(dest - dest_off, dest_len + dest_off);
648 from = early_memremap(src - src_off, src_len + src_off);
649 memcpy(to, from, len);
650 early_memunmap(to, dest_len + dest_off);
651 early_memunmap(from, src_len + src_off);
659 * Reserve Xen mfn_list.
661 static void __init xen_reserve_xen_mfnlist(void)
663 phys_addr_t start, size;
665 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
666 start = __pa(xen_start_info->mfn_list);
667 size = PFN_ALIGN(xen_start_info->nr_pages *
668 sizeof(unsigned long));
670 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
671 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
674 memblock_reserve(start, size);
675 if (!xen_is_e820_reserved(start, size))
679 memblock_phys_free(start, size);
683 * xen_memory_setup - Hook for machine specific memory setup.
685 char * __init xen_memory_setup(void)
687 unsigned long max_pfn, pfn_s, n_pfns;
688 phys_addr_t mem_end, addr, size, chunk_size;
691 struct xen_memory_map memmap;
692 unsigned long max_pages;
693 unsigned long extra_pages = 0;
698 max_pfn = xen_get_pages_limit();
699 max_pfn = min(max_pfn, xen_start_info->nr_pages);
700 mem_end = PFN_PHYS(max_pfn);
702 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
703 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
705 #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
706 xen_saved_max_mem_size = max_mem_size;
709 op = xen_initial_domain() ?
710 XENMEM_machine_memory_map :
712 rc = HYPERVISOR_memory_op(op, &memmap);
714 BUG_ON(xen_initial_domain());
715 memmap.nr_entries = 1;
716 xen_e820_table.entries[0].addr = 0ULL;
717 xen_e820_table.entries[0].size = mem_end;
718 /* 8MB slack (to balance backend allocations). */
719 xen_e820_table.entries[0].size += 8ULL << 20;
720 xen_e820_table.entries[0].type = E820_TYPE_RAM;
724 BUG_ON(memmap.nr_entries == 0);
725 xen_e820_table.nr_entries = memmap.nr_entries;
727 if (xen_initial_domain()) {
729 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
730 * regions, so if we're using the machine memory map leave the
731 * region as RAM as it is in the pseudo-physical map.
733 * UNUSABLE regions in domUs are not handled and will need
734 * a patch in the future.
736 xen_ignore_unusable();
738 #ifdef CONFIG_ISCSI_IBFT_FIND
739 /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */
740 xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START;
741 xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START;
742 xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED;
743 xen_e820_table.nr_entries++;
747 /* Make sure the Xen-supplied memory map is well-ordered. */
748 e820__update_table(&xen_e820_table);
750 max_pages = xen_get_max_pages();
752 /* How many extra pages do we need due to remapping? */
753 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
755 if (max_pages > max_pfn)
756 extra_pages += max_pages - max_pfn;
759 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
760 * factor the base size.
762 * Make sure we have no memory above max_pages, as this area
763 * isn't handled by the p2m management.
765 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
766 extra_pages, max_pages - max_pfn);
768 addr = xen_e820_table.entries[0].addr;
769 size = xen_e820_table.entries[0].size;
770 while (i < xen_e820_table.nr_entries) {
771 bool discard = false;
774 type = xen_e820_table.entries[i].type;
776 if (type == E820_TYPE_RESERVED)
777 xen_pv_pci_possible = true;
779 if (type == E820_TYPE_RAM) {
780 if (addr < mem_end) {
781 chunk_size = min(size, mem_end - addr);
782 } else if (extra_pages) {
783 chunk_size = min(size, PFN_PHYS(extra_pages));
784 pfn_s = PFN_UP(addr);
785 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
786 extra_pages -= n_pfns;
787 xen_add_extra_mem(pfn_s, n_pfns);
788 xen_max_p2m_pfn = pfn_s + n_pfns;
794 xen_align_and_add_e820_region(addr, chunk_size, type);
800 if (i < xen_e820_table.nr_entries) {
801 addr = xen_e820_table.entries[i].addr;
802 size = xen_e820_table.entries[i].size;
808 * Set the rest as identity mapped, in case PCI BARs are
811 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
814 * In domU, the ISA region is normal, usable memory, but we
815 * reserve ISA memory anyway because too many things poke
818 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
820 e820__update_table(e820_table);
823 * Check whether the kernel itself conflicts with the target E820 map.
824 * Failing now is better than running into weird problems later due
825 * to relocating (and even reusing) pages with kernel text or data.
827 if (xen_is_e820_reserved(__pa_symbol(_text),
828 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
829 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
834 * Check for a conflict of the hypervisor supplied page tables with
835 * the target E820 map.
839 xen_reserve_xen_mfnlist();
841 /* Check for a conflict of the initrd with the target E820 map. */
842 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
843 boot_params.hdr.ramdisk_size)) {
844 phys_addr_t new_area, start, size;
846 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
848 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
852 start = boot_params.hdr.ramdisk_image;
853 size = boot_params.hdr.ramdisk_size;
854 xen_phys_memcpy(new_area, start, size);
855 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
856 start, start + size, new_area, new_area + size);
857 memblock_phys_free(start, size);
858 boot_params.hdr.ramdisk_image = new_area;
859 boot_params.ext_ramdisk_image = new_area >> 32;
863 * Set identity map on non-RAM pages and prepare remapping the
866 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
868 pr_info("Released %ld page(s)\n", xen_released_pages);
873 static int register_callback(unsigned type, const void *func)
875 struct callback_register callback = {
877 .address = XEN_CALLBACK(__KERNEL_CS, func),
878 .flags = CALLBACKF_mask_events,
881 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
884 void xen_enable_sysenter(void)
886 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
887 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
888 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
891 void xen_enable_syscall(void)
895 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
897 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
898 /* Pretty fatal; 64-bit userspace has no other
899 mechanism for syscalls. */
902 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
903 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
904 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
907 static void __init xen_pvmmu_arch_setup(void)
909 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
911 if (register_callback(CALLBACKTYPE_event,
912 xen_asm_exc_xen_hypervisor_callback) ||
913 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
916 xen_enable_sysenter();
917 xen_enable_syscall();
920 /* This function is not called for HVM domains */
921 void __init xen_arch_setup(void)
923 xen_panic_handler_init();
924 xen_pvmmu_arch_setup();
927 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
928 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
933 memcpy(boot_command_line, xen_start_info->cmd_line,
934 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
935 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
937 /* Set up idle, making sure it calls safe_halt() pvop */
940 WARN_ON(xen_set_default_idle());