2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/init.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
21 #include <asm/sections.h>
22 #include <asm/xen/hypervisor.h>
23 #include <asm/xen/hypercall.h>
27 #include <xen/interface/callback.h>
28 #include <xen/interface/memory.h>
29 #include <xen/interface/physdev.h>
30 #include <xen/features.h>
31 #include <xen/hvc-console.h>
36 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
38 /* Amount of extra memory space we add to the e820 ranges */
39 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
41 /* Number of pages released from the initial allocation. */
42 unsigned long xen_released_pages;
44 /* E820 map used during setting up memory. */
45 static struct e820entry xen_e820_map[E820MAX] __initdata;
46 static u32 xen_e820_map_entries __initdata;
49 * Buffer used to remap identity mapped pages. We only need the virtual space.
50 * The physical page behind this address is remapped as needed to different
53 #define REMAP_SIZE (P2M_PER_PAGE - 3)
55 unsigned long next_area_mfn;
56 unsigned long target_pfn;
58 unsigned long mfns[REMAP_SIZE];
59 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
60 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
63 * The maximum amount of extra memory compared to the base size. The
64 * main scaling factor is the size of struct page. At extreme ratios
65 * of base:extra, all the base memory can be filled with page
66 * structures for the extra memory, leaving no space for anything
69 * 10x seems like a reasonable balance between scaling flexibility and
70 * leaving a practically usable system.
72 #define EXTRA_MEM_RATIO (10)
74 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
76 static void __init xen_parse_512gb(void)
81 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
85 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
88 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
91 xen_512gb_limit = val;
94 static void __init xen_add_extra_mem(unsigned long start_pfn,
100 * No need to check for zero size, should happen rarely and will only
101 * write a new entry regarded to be unused due to zero size.
103 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
104 /* Add new region. */
105 if (xen_extra_mem[i].n_pfns == 0) {
106 xen_extra_mem[i].start_pfn = start_pfn;
107 xen_extra_mem[i].n_pfns = n_pfns;
110 /* Append to existing region. */
111 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
113 xen_extra_mem[i].n_pfns += n_pfns;
117 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
118 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
120 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
123 static void __init xen_del_extra_mem(unsigned long start_pfn,
124 unsigned long n_pfns)
127 unsigned long start_r, size_r;
129 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
130 start_r = xen_extra_mem[i].start_pfn;
131 size_r = xen_extra_mem[i].n_pfns;
133 /* Start of region. */
134 if (start_r == start_pfn) {
135 BUG_ON(n_pfns > size_r);
136 xen_extra_mem[i].start_pfn += n_pfns;
137 xen_extra_mem[i].n_pfns -= n_pfns;
141 if (start_r + size_r == start_pfn + n_pfns) {
142 BUG_ON(n_pfns > size_r);
143 xen_extra_mem[i].n_pfns -= n_pfns;
147 if (start_pfn > start_r && start_pfn < start_r + size_r) {
148 BUG_ON(start_pfn + n_pfns > start_r + size_r);
149 xen_extra_mem[i].n_pfns = start_pfn - start_r;
150 /* Calling memblock_reserve() again is okay. */
151 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
152 (start_pfn + n_pfns));
156 memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
160 * Called during boot before the p2m list can take entries beyond the
161 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
164 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
168 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
169 if (pfn >= xen_extra_mem[i].start_pfn &&
170 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
171 return INVALID_P2M_ENTRY;
174 return IDENTITY_FRAME(pfn);
178 * Mark all pfns of extra mem as invalid in p2m list.
180 void __init xen_inv_extra_mem(void)
182 unsigned long pfn, pfn_s, pfn_e;
185 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
186 if (!xen_extra_mem[i].n_pfns)
188 pfn_s = xen_extra_mem[i].start_pfn;
189 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
190 for (pfn = pfn_s; pfn < pfn_e; pfn++)
191 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
196 * Finds the next RAM pfn available in the E820 map after min_pfn.
197 * This function updates min_pfn with the pfn found and returns
198 * the size of that range or zero if not found.
200 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
202 const struct e820entry *entry = xen_e820_map;
204 unsigned long done = 0;
206 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
210 if (entry->type != E820_RAM)
213 e_pfn = PFN_DOWN(entry->addr + entry->size);
215 /* We only care about E820 after this */
216 if (e_pfn <= *min_pfn)
219 s_pfn = PFN_UP(entry->addr);
221 /* If min_pfn falls within the E820 entry, we want to start
222 * at the min_pfn PFN.
224 if (s_pfn <= *min_pfn) {
225 done = e_pfn - *min_pfn;
227 done = e_pfn - s_pfn;
236 static int __init xen_free_mfn(unsigned long mfn)
238 struct xen_memory_reservation reservation = {
244 set_xen_guest_handle(reservation.extent_start, &mfn);
245 reservation.nr_extents = 1;
247 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
251 * This releases a chunk of memory and then does the identity map. It's used
252 * as a fallback if the remapping fails.
254 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
255 unsigned long end_pfn, unsigned long nr_pages)
257 unsigned long pfn, end;
260 WARN_ON(start_pfn > end_pfn);
262 /* Release pages first. */
263 end = min(end_pfn, nr_pages);
264 for (pfn = start_pfn; pfn < end; pfn++) {
265 unsigned long mfn = pfn_to_mfn(pfn);
267 /* Make sure pfn exists to start with */
268 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
271 ret = xen_free_mfn(mfn);
272 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
275 xen_released_pages++;
276 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
282 set_phys_range_identity(start_pfn, end_pfn);
286 * Helper function to update the p2m and m2p tables and kernel mapping.
288 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
290 struct mmu_update update = {
291 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
296 if (!set_phys_to_machine(pfn, mfn)) {
297 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
303 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
304 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
309 /* Update kernel mapping, but not for highmem. */
310 if (pfn >= PFN_UP(__pa(high_memory - 1)))
313 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
314 mfn_pte(mfn, PAGE_KERNEL), 0)) {
315 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
322 * This function updates the p2m and m2p tables with an identity map from
323 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
324 * original allocation at remap_pfn. The information needed for remapping is
325 * saved in the memory itself to avoid the need for allocating buffers. The
326 * complete remap information is contained in a list of MFNs each containing
327 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
328 * This enables us to preserve the original mfn sequence while doing the
329 * remapping at a time when the memory management is capable of allocating
330 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
333 static void __init xen_do_set_identity_and_remap_chunk(
334 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
336 unsigned long buf = (unsigned long)&xen_remap_buf;
337 unsigned long mfn_save, mfn;
338 unsigned long ident_pfn_iter, remap_pfn_iter;
339 unsigned long ident_end_pfn = start_pfn + size;
340 unsigned long left = size;
341 unsigned int i, chunk;
345 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
347 mfn_save = virt_to_mfn(buf);
349 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
350 ident_pfn_iter < ident_end_pfn;
351 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
352 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
354 /* Map first pfn to xen_remap_buf */
355 mfn = pfn_to_mfn(ident_pfn_iter);
356 set_pte_mfn(buf, mfn, PAGE_KERNEL);
358 /* Save mapping information in page */
359 xen_remap_buf.next_area_mfn = xen_remap_mfn;
360 xen_remap_buf.target_pfn = remap_pfn_iter;
361 xen_remap_buf.size = chunk;
362 for (i = 0; i < chunk; i++)
363 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
365 /* Put remap buf into list. */
368 /* Set identity map */
369 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
374 /* Restore old xen_remap_buf mapping */
375 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
379 * This function takes a contiguous pfn range that needs to be identity mapped
382 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
383 * 2) Calls the do_ function to actually do the mapping/remapping work.
385 * The goal is to not allocate additional memory but to remap the existing
386 * pages. In the case of an error the underlying memory is simply released back
387 * to Xen and not remapped.
389 static unsigned long __init xen_set_identity_and_remap_chunk(
390 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
391 unsigned long remap_pfn)
395 unsigned long n = end_pfn - start_pfn;
398 remap_pfn = nr_pages;
401 unsigned long cur_pfn = start_pfn + i;
402 unsigned long left = n - i;
403 unsigned long size = left;
404 unsigned long remap_range_size;
406 /* Do not remap pages beyond the current allocation */
407 if (cur_pfn >= nr_pages) {
408 /* Identity map remaining pages */
409 set_phys_range_identity(cur_pfn, cur_pfn + size);
412 if (cur_pfn + size > nr_pages)
413 size = nr_pages - cur_pfn;
415 remap_range_size = xen_find_pfn_range(&remap_pfn);
416 if (!remap_range_size) {
417 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
418 xen_set_identity_and_release_chunk(cur_pfn,
419 cur_pfn + left, nr_pages);
422 /* Adjust size to fit in current e820 RAM region */
423 if (size > remap_range_size)
424 size = remap_range_size;
426 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
428 /* Update variables to reflect new mappings. */
434 * If the PFNs are currently mapped, the VA mapping also needs
435 * to be updated to be 1:1.
437 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
438 (void)HYPERVISOR_update_va_mapping(
439 (unsigned long)__va(pfn << PAGE_SHIFT),
440 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
445 static unsigned long __init xen_count_remap_pages(
446 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
447 unsigned long remap_pages)
449 if (start_pfn >= nr_pages)
452 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
455 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
456 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
457 unsigned long nr_pages, unsigned long last_val))
459 phys_addr_t start = 0;
460 unsigned long ret_val = 0;
461 const struct e820entry *entry = xen_e820_map;
465 * Combine non-RAM regions and gaps until a RAM region (or the
466 * end of the map) is reached, then call the provided function
467 * to perform its duty on the non-RAM region.
469 * The combined non-RAM regions are rounded to a whole number
470 * of pages so any partial pages are accessible via the 1:1
471 * mapping. This is needed for some BIOSes that put (for
472 * example) the DMI tables in a reserved region that begins on
473 * a non-page boundary.
475 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
476 phys_addr_t end = entry->addr + entry->size;
477 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
478 unsigned long start_pfn = PFN_DOWN(start);
479 unsigned long end_pfn = PFN_UP(end);
481 if (entry->type == E820_RAM)
482 end_pfn = PFN_UP(entry->addr);
484 if (start_pfn < end_pfn)
485 ret_val = func(start_pfn, end_pfn, nr_pages,
495 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
496 * The remap information (which mfn remap to which pfn) is contained in the
497 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
498 * This scheme allows to remap the different chunks in arbitrary order while
499 * the resulting mapping will be independant from the order.
501 void __init xen_remap_memory(void)
503 unsigned long buf = (unsigned long)&xen_remap_buf;
504 unsigned long mfn_save, mfn, pfn;
505 unsigned long remapped = 0;
507 unsigned long pfn_s = ~0UL;
508 unsigned long len = 0;
510 mfn_save = virt_to_mfn(buf);
512 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
513 /* Map the remap information */
514 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
516 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
518 pfn = xen_remap_buf.target_pfn;
519 for (i = 0; i < xen_remap_buf.size; i++) {
520 mfn = xen_remap_buf.mfns[i];
521 xen_update_mem_tables(pfn, mfn);
525 if (pfn_s == ~0UL || pfn == pfn_s) {
526 pfn_s = xen_remap_buf.target_pfn;
527 len += xen_remap_buf.size;
528 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
529 len += xen_remap_buf.size;
531 xen_del_extra_mem(pfn_s, len);
532 pfn_s = xen_remap_buf.target_pfn;
533 len = xen_remap_buf.size;
537 xen_remap_mfn = xen_remap_buf.next_area_mfn;
540 if (pfn_s != ~0UL && len)
541 xen_del_extra_mem(pfn_s, len);
543 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
545 pr_info("Remapped %ld page(s)\n", remapped);
548 static unsigned long __init xen_get_pages_limit(void)
553 limit = GB(64) / PAGE_SIZE;
555 limit = MAXMEM / PAGE_SIZE;
556 if (!xen_initial_domain() && xen_512gb_limit)
557 limit = GB(512) / PAGE_SIZE;
562 static unsigned long __init xen_get_max_pages(void)
564 unsigned long max_pages, limit;
565 domid_t domid = DOMID_SELF;
568 limit = xen_get_pages_limit();
572 * For the initial domain we use the maximum reservation as
575 * For guest domains the current maximum reservation reflects
576 * the current maximum rather than the static maximum. In this
577 * case the e820 map provided to us will cover the static
580 if (xen_initial_domain()) {
581 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
586 return min(max_pages, limit);
589 static void __init xen_align_and_add_e820_region(phys_addr_t start,
590 phys_addr_t size, int type)
592 phys_addr_t end = start + size;
594 /* Align RAM regions to page boundaries. */
595 if (type == E820_RAM) {
596 start = PAGE_ALIGN(start);
597 end &= ~((phys_addr_t)PAGE_SIZE - 1);
600 e820_add_region(start, end - start, type);
603 static void __init xen_ignore_unusable(void)
605 struct e820entry *entry = xen_e820_map;
608 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
609 if (entry->type == E820_UNUSABLE)
610 entry->type = E820_RAM;
614 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
616 struct e820entry *entry;
624 entry = xen_e820_map;
626 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
627 if (entry->type == E820_RAM && entry->addr <= start &&
628 (entry->addr + entry->size) >= end)
638 * Find a free area in physical memory not yet reserved and compliant with
640 * Used to relocate pre-allocated areas like initrd or p2m list which are in
641 * conflict with the to be used E820 map.
642 * In case no area is found, return 0. Otherwise return the physical address
643 * of the area which is already reserved for convenience.
645 phys_addr_t __init xen_find_free_area(phys_addr_t size)
648 phys_addr_t addr, start;
649 struct e820entry *entry = xen_e820_map;
651 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
652 if (entry->type != E820_RAM || entry->size < size)
655 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
656 if (!memblock_is_reserved(addr))
658 start = addr + PAGE_SIZE;
659 if (start + size > entry->addr + entry->size)
662 if (addr >= start + size) {
663 memblock_reserve(start, size);
672 * Like memcpy, but with physical addresses for dest and src.
674 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
677 phys_addr_t dest_off, src_off, dest_len, src_len, len;
681 dest_off = dest & ~PAGE_MASK;
682 src_off = src & ~PAGE_MASK;
684 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
685 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
687 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
688 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
689 len = min(dest_len, src_len);
690 to = early_memremap(dest - dest_off, dest_len + dest_off);
691 from = early_memremap(src - src_off, src_len + src_off);
692 memcpy(to, from, len);
693 early_memunmap(to, dest_len + dest_off);
694 early_memunmap(from, src_len + src_off);
702 * Reserve Xen mfn_list.
704 static void __init xen_reserve_xen_mfnlist(void)
706 phys_addr_t start, size;
708 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
709 start = __pa(xen_start_info->mfn_list);
710 size = PFN_ALIGN(xen_start_info->nr_pages *
711 sizeof(unsigned long));
713 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
714 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
717 memblock_reserve(start, size);
718 if (!xen_is_e820_reserved(start, size))
723 * Relocating the p2m on 32 bit system to an arbitrary virtual address
724 * is not supported, so just give up.
726 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
730 memblock_free(start, size);
735 * machine_specific_memory_setup - Hook for machine specific memory setup.
737 char * __init xen_memory_setup(void)
739 unsigned long max_pfn, pfn_s, n_pfns;
740 phys_addr_t mem_end, addr, size, chunk_size;
743 struct xen_memory_map memmap;
744 unsigned long max_pages;
745 unsigned long extra_pages = 0;
750 max_pfn = xen_get_pages_limit();
751 max_pfn = min(max_pfn, xen_start_info->nr_pages);
752 mem_end = PFN_PHYS(max_pfn);
754 memmap.nr_entries = E820MAX;
755 set_xen_guest_handle(memmap.buffer, xen_e820_map);
757 op = xen_initial_domain() ?
758 XENMEM_machine_memory_map :
760 rc = HYPERVISOR_memory_op(op, &memmap);
762 BUG_ON(xen_initial_domain());
763 memmap.nr_entries = 1;
764 xen_e820_map[0].addr = 0ULL;
765 xen_e820_map[0].size = mem_end;
766 /* 8MB slack (to balance backend allocations). */
767 xen_e820_map[0].size += 8ULL << 20;
768 xen_e820_map[0].type = E820_RAM;
772 BUG_ON(memmap.nr_entries == 0);
773 xen_e820_map_entries = memmap.nr_entries;
776 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
777 * regions, so if we're using the machine memory map leave the
778 * region as RAM as it is in the pseudo-physical map.
780 * UNUSABLE regions in domUs are not handled and will need
781 * a patch in the future.
783 if (xen_initial_domain())
784 xen_ignore_unusable();
786 /* Make sure the Xen-supplied memory map is well-ordered. */
787 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
788 &xen_e820_map_entries);
790 max_pages = xen_get_max_pages();
792 /* How many extra pages do we need due to remapping? */
793 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
795 if (max_pages > max_pfn)
796 extra_pages += max_pages - max_pfn;
799 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
800 * factor the base size. On non-highmem systems, the base
801 * size is the full initial memory allocation; on highmem it
802 * is limited to the max size of lowmem, so that it doesn't
803 * get completely filled.
805 * Make sure we have no memory above max_pages, as this area
806 * isn't handled by the p2m management.
808 * In principle there could be a problem in lowmem systems if
809 * the initial memory is also very large with respect to
810 * lowmem, but we won't try to deal with that here.
812 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
813 extra_pages, max_pages - max_pfn);
815 addr = xen_e820_map[0].addr;
816 size = xen_e820_map[0].size;
817 while (i < xen_e820_map_entries) {
818 bool discard = false;
821 type = xen_e820_map[i].type;
823 if (type == E820_RAM) {
824 if (addr < mem_end) {
825 chunk_size = min(size, mem_end - addr);
826 } else if (extra_pages) {
827 chunk_size = min(size, PFN_PHYS(extra_pages));
828 pfn_s = PFN_UP(addr);
829 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
830 extra_pages -= n_pfns;
831 xen_add_extra_mem(pfn_s, n_pfns);
832 xen_max_p2m_pfn = pfn_s + n_pfns;
838 xen_align_and_add_e820_region(addr, chunk_size, type);
844 if (i < xen_e820_map_entries) {
845 addr = xen_e820_map[i].addr;
846 size = xen_e820_map[i].size;
852 * Set the rest as identity mapped, in case PCI BARs are
855 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
858 * In domU, the ISA region is normal, usable memory, but we
859 * reserve ISA memory anyway because too many things poke
862 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
865 sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
868 * Check whether the kernel itself conflicts with the target E820 map.
869 * Failing now is better than running into weird problems later due
870 * to relocating (and even reusing) pages with kernel text or data.
872 if (xen_is_e820_reserved(__pa_symbol(_text),
873 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
874 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
879 * Check for a conflict of the hypervisor supplied page tables with
880 * the target E820 map.
884 xen_reserve_xen_mfnlist();
886 /* Check for a conflict of the initrd with the target E820 map. */
887 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
888 boot_params.hdr.ramdisk_size)) {
889 phys_addr_t new_area, start, size;
891 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
893 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
897 start = boot_params.hdr.ramdisk_image;
898 size = boot_params.hdr.ramdisk_size;
899 xen_phys_memcpy(new_area, start, size);
900 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
901 start, start + size, new_area, new_area + size);
902 memblock_free(start, size);
903 boot_params.hdr.ramdisk_image = new_area;
904 boot_params.ext_ramdisk_image = new_area >> 32;
908 * Set identity map on non-RAM pages and prepare remapping the
911 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
913 pr_info("Released %ld page(s)\n", xen_released_pages);
919 * Machine specific memory setup for auto-translated guests.
921 char * __init xen_auto_xlated_memory_setup(void)
923 struct xen_memory_map memmap;
927 memmap.nr_entries = E820MAX;
928 set_xen_guest_handle(memmap.buffer, xen_e820_map);
930 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
932 panic("No memory map (%d)\n", rc);
934 xen_e820_map_entries = memmap.nr_entries;
936 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
937 &xen_e820_map_entries);
939 for (i = 0; i < xen_e820_map_entries; i++)
940 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
941 xen_e820_map[i].type);
943 /* Remove p2m info, it is not needed. */
944 xen_start_info->mfn_list = 0;
945 xen_start_info->first_p2m_pfn = 0;
946 xen_start_info->nr_p2m_frames = 0;
952 * Set the bit indicating "nosegneg" library variants should be used.
953 * We only need to bother in pure 32-bit mode; compat 32-bit processes
954 * can have un-truncated segments, so wrapping around is allowed.
956 static void __init fiddle_vdso(void)
959 u32 *mask = vdso_image_32.data +
960 vdso_image_32.sym_VDSO32_NOTE_MASK;
961 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
965 static int register_callback(unsigned type, const void *func)
967 struct callback_register callback = {
969 .address = XEN_CALLBACK(__KERNEL_CS, func),
970 .flags = CALLBACKF_mask_events,
973 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
976 void xen_enable_sysenter(void)
979 unsigned sysenter_feature;
982 sysenter_feature = X86_FEATURE_SEP;
984 sysenter_feature = X86_FEATURE_SYSENTER32;
987 if (!boot_cpu_has(sysenter_feature))
990 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
992 setup_clear_cpu_cap(sysenter_feature);
995 void xen_enable_syscall(void)
1000 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
1002 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
1003 /* Pretty fatal; 64-bit userspace has no other
1004 mechanism for syscalls. */
1007 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
1008 ret = register_callback(CALLBACKTYPE_syscall32,
1009 xen_syscall32_target);
1011 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1013 #endif /* CONFIG_X86_64 */
1016 void __init xen_pvmmu_arch_setup(void)
1018 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1019 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1021 HYPERVISOR_vm_assist(VMASST_CMD_enable,
1022 VMASST_TYPE_pae_extended_cr3);
1024 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1025 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1028 xen_enable_sysenter();
1029 xen_enable_syscall();
1032 /* This function is not called for HVM domains */
1033 void __init xen_arch_setup(void)
1035 xen_panic_handler_init();
1036 if (!xen_feature(XENFEAT_auto_translated_physmap))
1037 xen_pvmmu_arch_setup();
1040 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1041 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1046 memcpy(boot_command_line, xen_start_info->cmd_line,
1047 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1048 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1050 /* Set up idle, making sure it calls safe_halt() pvop */
1053 WARN_ON(xen_set_default_idle());