1 // SPDX-License-Identifier: GPL-2.0
3 * Machine specific setup for xen
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/kstrtox.h>
13 #include <linux/memblock.h>
14 #include <linux/cpuidle.h>
15 #include <linux/cpufreq.h>
16 #include <linux/memory_hotplug.h>
20 #include <asm/e820/api.h>
21 #include <asm/setup.h>
24 #include <asm/idtentry.h>
25 #include <asm/xen/hypervisor.h>
26 #include <asm/xen/hypercall.h>
30 #include <xen/interface/callback.h>
31 #include <xen/interface/memory.h>
32 #include <xen/interface/physdev.h>
33 #include <xen/features.h>
34 #include <xen/hvc-console.h>
38 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
40 /* Amount of extra memory space we add to the e820 ranges */
41 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
43 /* Number of pages released from the initial allocation. */
44 unsigned long xen_released_pages;
46 /* E820 map used during setting up memory. */
47 static struct e820_table xen_e820_table __initdata;
50 * Buffer used to remap identity mapped pages. We only need the virtual space.
51 * The physical page behind this address is remapped as needed to different
54 #define REMAP_SIZE (P2M_PER_PAGE - 3)
56 unsigned long next_area_mfn;
57 unsigned long target_pfn;
59 unsigned long mfns[REMAP_SIZE];
60 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
61 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
64 * The maximum amount of extra memory compared to the base size. The
65 * main scaling factor is the size of struct page. At extreme ratios
66 * of base:extra, all the base memory can be filled with page
67 * structures for the extra memory, leaving no space for anything
70 * 10x seems like a reasonable balance between scaling flexibility and
71 * leaving a practically usable system.
73 #define EXTRA_MEM_RATIO (10)
75 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
77 static void __init xen_parse_512gb(void)
82 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
86 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
89 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
92 xen_512gb_limit = val;
95 static void __init xen_add_extra_mem(unsigned long start_pfn,
101 * No need to check for zero size, should happen rarely and will only
102 * write a new entry regarded to be unused due to zero size.
104 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
105 /* Add new region. */
106 if (xen_extra_mem[i].n_pfns == 0) {
107 xen_extra_mem[i].start_pfn = start_pfn;
108 xen_extra_mem[i].n_pfns = n_pfns;
111 /* Append to existing region. */
112 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
114 xen_extra_mem[i].n_pfns += n_pfns;
118 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
119 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
121 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
124 static void __init xen_del_extra_mem(unsigned long start_pfn,
125 unsigned long n_pfns)
128 unsigned long start_r, size_r;
130 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
131 start_r = xen_extra_mem[i].start_pfn;
132 size_r = xen_extra_mem[i].n_pfns;
134 /* Start of region. */
135 if (start_r == start_pfn) {
136 BUG_ON(n_pfns > size_r);
137 xen_extra_mem[i].start_pfn += n_pfns;
138 xen_extra_mem[i].n_pfns -= n_pfns;
142 if (start_r + size_r == start_pfn + n_pfns) {
143 BUG_ON(n_pfns > size_r);
144 xen_extra_mem[i].n_pfns -= n_pfns;
148 if (start_pfn > start_r && start_pfn < start_r + size_r) {
149 BUG_ON(start_pfn + n_pfns > start_r + size_r);
150 xen_extra_mem[i].n_pfns = start_pfn - start_r;
151 /* Calling memblock_reserve() again is okay. */
152 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
153 (start_pfn + n_pfns));
157 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
161 * Called during boot before the p2m list can take entries beyond the
162 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
165 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
169 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
170 if (pfn >= xen_extra_mem[i].start_pfn &&
171 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
172 return INVALID_P2M_ENTRY;
175 return IDENTITY_FRAME(pfn);
179 * Mark all pfns of extra mem as invalid in p2m list.
181 void __init xen_inv_extra_mem(void)
183 unsigned long pfn, pfn_s, pfn_e;
186 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
187 if (!xen_extra_mem[i].n_pfns)
189 pfn_s = xen_extra_mem[i].start_pfn;
190 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
191 for (pfn = pfn_s; pfn < pfn_e; pfn++)
192 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
197 * Finds the next RAM pfn available in the E820 map after min_pfn.
198 * This function updates min_pfn with the pfn found and returns
199 * the size of that range or zero if not found.
201 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
203 const struct e820_entry *entry = xen_e820_table.entries;
205 unsigned long done = 0;
207 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
211 if (entry->type != E820_TYPE_RAM)
214 e_pfn = PFN_DOWN(entry->addr + entry->size);
216 /* We only care about E820 after this */
217 if (e_pfn <= *min_pfn)
220 s_pfn = PFN_UP(entry->addr);
222 /* If min_pfn falls within the E820 entry, we want to start
223 * at the min_pfn PFN.
225 if (s_pfn <= *min_pfn) {
226 done = e_pfn - *min_pfn;
228 done = e_pfn - s_pfn;
237 static int __init xen_free_mfn(unsigned long mfn)
239 struct xen_memory_reservation reservation = {
245 set_xen_guest_handle(reservation.extent_start, &mfn);
246 reservation.nr_extents = 1;
248 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
252 * This releases a chunk of memory and then does the identity map. It's used
253 * as a fallback if the remapping fails.
255 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
256 unsigned long end_pfn, unsigned long nr_pages)
258 unsigned long pfn, end;
261 WARN_ON(start_pfn > end_pfn);
263 /* Release pages first. */
264 end = min(end_pfn, nr_pages);
265 for (pfn = start_pfn; pfn < end; pfn++) {
266 unsigned long mfn = pfn_to_mfn(pfn);
268 /* Make sure pfn exists to start with */
269 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
272 ret = xen_free_mfn(mfn);
273 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
276 xen_released_pages++;
277 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
283 set_phys_range_identity(start_pfn, end_pfn);
287 * Helper function to update the p2m and m2p tables and kernel mapping.
289 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
291 struct mmu_update update = {
292 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
297 if (!set_phys_to_machine(pfn, mfn)) {
298 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
304 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
305 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
310 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
311 mfn_pte(mfn, PAGE_KERNEL), 0)) {
312 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
319 * This function updates the p2m and m2p tables with an identity map from
320 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
321 * original allocation at remap_pfn. The information needed for remapping is
322 * saved in the memory itself to avoid the need for allocating buffers. The
323 * complete remap information is contained in a list of MFNs each containing
324 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
325 * This enables us to preserve the original mfn sequence while doing the
326 * remapping at a time when the memory management is capable of allocating
327 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
330 static void __init xen_do_set_identity_and_remap_chunk(
331 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
333 unsigned long buf = (unsigned long)&xen_remap_buf;
334 unsigned long mfn_save, mfn;
335 unsigned long ident_pfn_iter, remap_pfn_iter;
336 unsigned long ident_end_pfn = start_pfn + size;
337 unsigned long left = size;
338 unsigned int i, chunk;
342 mfn_save = virt_to_mfn(buf);
344 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
345 ident_pfn_iter < ident_end_pfn;
346 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
347 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
349 /* Map first pfn to xen_remap_buf */
350 mfn = pfn_to_mfn(ident_pfn_iter);
351 set_pte_mfn(buf, mfn, PAGE_KERNEL);
353 /* Save mapping information in page */
354 xen_remap_buf.next_area_mfn = xen_remap_mfn;
355 xen_remap_buf.target_pfn = remap_pfn_iter;
356 xen_remap_buf.size = chunk;
357 for (i = 0; i < chunk; i++)
358 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
360 /* Put remap buf into list. */
363 /* Set identity map */
364 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
369 /* Restore old xen_remap_buf mapping */
370 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
374 * This function takes a contiguous pfn range that needs to be identity mapped
377 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
378 * 2) Calls the do_ function to actually do the mapping/remapping work.
380 * The goal is to not allocate additional memory but to remap the existing
381 * pages. In the case of an error the underlying memory is simply released back
382 * to Xen and not remapped.
384 static unsigned long __init xen_set_identity_and_remap_chunk(
385 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
386 unsigned long remap_pfn)
390 unsigned long n = end_pfn - start_pfn;
393 remap_pfn = nr_pages;
396 unsigned long cur_pfn = start_pfn + i;
397 unsigned long left = n - i;
398 unsigned long size = left;
399 unsigned long remap_range_size;
401 /* Do not remap pages beyond the current allocation */
402 if (cur_pfn >= nr_pages) {
403 /* Identity map remaining pages */
404 set_phys_range_identity(cur_pfn, cur_pfn + size);
407 if (cur_pfn + size > nr_pages)
408 size = nr_pages - cur_pfn;
410 remap_range_size = xen_find_pfn_range(&remap_pfn);
411 if (!remap_range_size) {
412 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
413 xen_set_identity_and_release_chunk(cur_pfn,
414 cur_pfn + left, nr_pages);
417 /* Adjust size to fit in current e820 RAM region */
418 if (size > remap_range_size)
419 size = remap_range_size;
421 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
423 /* Update variables to reflect new mappings. */
429 * If the PFNs are currently mapped, their VA mappings need to be
432 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
433 (void)HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT),
435 native_make_pte(0), 0);
440 static unsigned long __init xen_count_remap_pages(
441 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
442 unsigned long remap_pages)
444 if (start_pfn >= nr_pages)
447 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
450 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
451 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
452 unsigned long nr_pages, unsigned long last_val))
454 phys_addr_t start = 0;
455 unsigned long ret_val = 0;
456 const struct e820_entry *entry = xen_e820_table.entries;
460 * Combine non-RAM regions and gaps until a RAM region (or the
461 * end of the map) is reached, then call the provided function
462 * to perform its duty on the non-RAM region.
464 * The combined non-RAM regions are rounded to a whole number
465 * of pages so any partial pages are accessible via the 1:1
466 * mapping. This is needed for some BIOSes that put (for
467 * example) the DMI tables in a reserved region that begins on
468 * a non-page boundary.
470 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
471 phys_addr_t end = entry->addr + entry->size;
472 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
473 unsigned long start_pfn = PFN_DOWN(start);
474 unsigned long end_pfn = PFN_UP(end);
476 if (entry->type == E820_TYPE_RAM)
477 end_pfn = PFN_UP(entry->addr);
479 if (start_pfn < end_pfn)
480 ret_val = func(start_pfn, end_pfn, nr_pages,
490 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
491 * The remap information (which mfn remap to which pfn) is contained in the
492 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
493 * This scheme allows to remap the different chunks in arbitrary order while
494 * the resulting mapping will be independent from the order.
496 void __init xen_remap_memory(void)
498 unsigned long buf = (unsigned long)&xen_remap_buf;
499 unsigned long mfn_save, pfn;
500 unsigned long remapped = 0;
502 unsigned long pfn_s = ~0UL;
503 unsigned long len = 0;
505 mfn_save = virt_to_mfn(buf);
507 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
508 /* Map the remap information */
509 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
511 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
513 pfn = xen_remap_buf.target_pfn;
514 for (i = 0; i < xen_remap_buf.size; i++) {
515 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
519 if (pfn_s == ~0UL || pfn == pfn_s) {
520 pfn_s = xen_remap_buf.target_pfn;
521 len += xen_remap_buf.size;
522 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
523 len += xen_remap_buf.size;
525 xen_del_extra_mem(pfn_s, len);
526 pfn_s = xen_remap_buf.target_pfn;
527 len = xen_remap_buf.size;
529 xen_remap_mfn = xen_remap_buf.next_area_mfn;
532 if (pfn_s != ~0UL && len)
533 xen_del_extra_mem(pfn_s, len);
535 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
537 pr_info("Remapped %ld page(s)\n", remapped);
540 static unsigned long __init xen_get_pages_limit(void)
544 limit = MAXMEM / PAGE_SIZE;
545 if (!xen_initial_domain() && xen_512gb_limit)
546 limit = GB(512) / PAGE_SIZE;
551 static unsigned long __init xen_get_max_pages(void)
553 unsigned long max_pages, limit;
554 domid_t domid = DOMID_SELF;
557 limit = xen_get_pages_limit();
561 * For the initial domain we use the maximum reservation as
564 * For guest domains the current maximum reservation reflects
565 * the current maximum rather than the static maximum. In this
566 * case the e820 map provided to us will cover the static
569 if (xen_initial_domain()) {
570 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
575 return min(max_pages, limit);
578 static void __init xen_align_and_add_e820_region(phys_addr_t start,
579 phys_addr_t size, int type)
581 phys_addr_t end = start + size;
583 /* Align RAM regions to page boundaries. */
584 if (type == E820_TYPE_RAM) {
585 start = PAGE_ALIGN(start);
586 end &= ~((phys_addr_t)PAGE_SIZE - 1);
587 #ifdef CONFIG_MEMORY_HOTPLUG
589 * Don't allow adding memory not in E820 map while booting the
590 * system. Once the balloon driver is up it will remove that
597 e820__range_add(start, end - start, type);
600 static void __init xen_ignore_unusable(void)
602 struct e820_entry *entry = xen_e820_table.entries;
605 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
606 if (entry->type == E820_TYPE_UNUSABLE)
607 entry->type = E820_TYPE_RAM;
611 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
613 struct e820_entry *entry;
621 entry = xen_e820_table.entries;
623 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
624 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
625 (entry->addr + entry->size) >= end)
635 * Find a free area in physical memory not yet reserved and compliant with
637 * Used to relocate pre-allocated areas like initrd or p2m list which are in
638 * conflict with the to be used E820 map.
639 * In case no area is found, return 0. Otherwise return the physical address
640 * of the area which is already reserved for convenience.
642 phys_addr_t __init xen_find_free_area(phys_addr_t size)
645 phys_addr_t addr, start;
646 struct e820_entry *entry = xen_e820_table.entries;
648 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
649 if (entry->type != E820_TYPE_RAM || entry->size < size)
652 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
653 if (!memblock_is_reserved(addr))
655 start = addr + PAGE_SIZE;
656 if (start + size > entry->addr + entry->size)
659 if (addr >= start + size) {
660 memblock_reserve(start, size);
669 * Like memcpy, but with physical addresses for dest and src.
671 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
674 phys_addr_t dest_off, src_off, dest_len, src_len, len;
678 dest_off = dest & ~PAGE_MASK;
679 src_off = src & ~PAGE_MASK;
681 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
682 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
684 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
685 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
686 len = min(dest_len, src_len);
687 to = early_memremap(dest - dest_off, dest_len + dest_off);
688 from = early_memremap(src - src_off, src_len + src_off);
689 memcpy(to, from, len);
690 early_memunmap(to, dest_len + dest_off);
691 early_memunmap(from, src_len + src_off);
699 * Reserve Xen mfn_list.
701 static void __init xen_reserve_xen_mfnlist(void)
703 phys_addr_t start, size;
705 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
706 start = __pa(xen_start_info->mfn_list);
707 size = PFN_ALIGN(xen_start_info->nr_pages *
708 sizeof(unsigned long));
710 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
711 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
714 memblock_reserve(start, size);
715 if (!xen_is_e820_reserved(start, size))
719 memblock_phys_free(start, size);
723 * xen_memory_setup - Hook for machine specific memory setup.
725 char * __init xen_memory_setup(void)
727 unsigned long max_pfn, pfn_s, n_pfns;
728 phys_addr_t mem_end, addr, size, chunk_size;
731 struct xen_memory_map memmap;
732 unsigned long max_pages;
733 unsigned long extra_pages = 0;
738 max_pfn = xen_get_pages_limit();
739 max_pfn = min(max_pfn, xen_start_info->nr_pages);
740 mem_end = PFN_PHYS(max_pfn);
742 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
743 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
745 #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
746 xen_saved_max_mem_size = max_mem_size;
749 op = xen_initial_domain() ?
750 XENMEM_machine_memory_map :
752 rc = HYPERVISOR_memory_op(op, &memmap);
754 BUG_ON(xen_initial_domain());
755 memmap.nr_entries = 1;
756 xen_e820_table.entries[0].addr = 0ULL;
757 xen_e820_table.entries[0].size = mem_end;
758 /* 8MB slack (to balance backend allocations). */
759 xen_e820_table.entries[0].size += 8ULL << 20;
760 xen_e820_table.entries[0].type = E820_TYPE_RAM;
764 BUG_ON(memmap.nr_entries == 0);
765 xen_e820_table.nr_entries = memmap.nr_entries;
768 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
769 * regions, so if we're using the machine memory map leave the
770 * region as RAM as it is in the pseudo-physical map.
772 * UNUSABLE regions in domUs are not handled and will need
773 * a patch in the future.
775 if (xen_initial_domain())
776 xen_ignore_unusable();
778 /* Make sure the Xen-supplied memory map is well-ordered. */
779 e820__update_table(&xen_e820_table);
781 max_pages = xen_get_max_pages();
783 /* How many extra pages do we need due to remapping? */
784 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
786 if (max_pages > max_pfn)
787 extra_pages += max_pages - max_pfn;
790 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
791 * factor the base size.
793 * Make sure we have no memory above max_pages, as this area
794 * isn't handled by the p2m management.
796 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
797 extra_pages, max_pages - max_pfn);
799 addr = xen_e820_table.entries[0].addr;
800 size = xen_e820_table.entries[0].size;
801 while (i < xen_e820_table.nr_entries) {
802 bool discard = false;
805 type = xen_e820_table.entries[i].type;
807 if (type == E820_TYPE_RAM) {
808 if (addr < mem_end) {
809 chunk_size = min(size, mem_end - addr);
810 } else if (extra_pages) {
811 chunk_size = min(size, PFN_PHYS(extra_pages));
812 pfn_s = PFN_UP(addr);
813 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
814 extra_pages -= n_pfns;
815 xen_add_extra_mem(pfn_s, n_pfns);
816 xen_max_p2m_pfn = pfn_s + n_pfns;
822 xen_align_and_add_e820_region(addr, chunk_size, type);
828 if (i < xen_e820_table.nr_entries) {
829 addr = xen_e820_table.entries[i].addr;
830 size = xen_e820_table.entries[i].size;
836 * Set the rest as identity mapped, in case PCI BARs are
839 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
842 * In domU, the ISA region is normal, usable memory, but we
843 * reserve ISA memory anyway because too many things poke
846 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
848 e820__update_table(e820_table);
851 * Check whether the kernel itself conflicts with the target E820 map.
852 * Failing now is better than running into weird problems later due
853 * to relocating (and even reusing) pages with kernel text or data.
855 if (xen_is_e820_reserved(__pa_symbol(_text),
856 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
857 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
862 * Check for a conflict of the hypervisor supplied page tables with
863 * the target E820 map.
867 xen_reserve_xen_mfnlist();
869 /* Check for a conflict of the initrd with the target E820 map. */
870 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
871 boot_params.hdr.ramdisk_size)) {
872 phys_addr_t new_area, start, size;
874 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
876 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
880 start = boot_params.hdr.ramdisk_image;
881 size = boot_params.hdr.ramdisk_size;
882 xen_phys_memcpy(new_area, start, size);
883 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
884 start, start + size, new_area, new_area + size);
885 memblock_phys_free(start, size);
886 boot_params.hdr.ramdisk_image = new_area;
887 boot_params.ext_ramdisk_image = new_area >> 32;
891 * Set identity map on non-RAM pages and prepare remapping the
894 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
896 pr_info("Released %ld page(s)\n", xen_released_pages);
901 static int register_callback(unsigned type, const void *func)
903 struct callback_register callback = {
905 .address = XEN_CALLBACK(__KERNEL_CS, func),
906 .flags = CALLBACKF_mask_events,
909 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
912 void xen_enable_sysenter(void)
914 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
915 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
916 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
919 void xen_enable_syscall(void)
923 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
925 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
926 /* Pretty fatal; 64-bit userspace has no other
927 mechanism for syscalls. */
930 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
931 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
932 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
935 static void __init xen_pvmmu_arch_setup(void)
937 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
938 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
940 HYPERVISOR_vm_assist(VMASST_CMD_enable,
941 VMASST_TYPE_pae_extended_cr3);
943 if (register_callback(CALLBACKTYPE_event,
944 xen_asm_exc_xen_hypervisor_callback) ||
945 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
948 xen_enable_sysenter();
949 xen_enable_syscall();
952 /* This function is not called for HVM domains */
953 void __init xen_arch_setup(void)
955 xen_panic_handler_init();
956 xen_pvmmu_arch_setup();
959 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
960 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
965 memcpy(boot_command_line, xen_start_info->cmd_line,
966 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
967 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
969 /* Set up idle, making sure it calls safe_halt() pvop */
972 WARN_ON(xen_set_default_idle());