1 // SPDX-License-Identifier: GPL-2.0
3 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
4 #include <linux/memblock.h>
6 #include <linux/console.h>
8 #include <linux/kexec.h>
9 #include <linux/memblock.h>
10 #include <linux/slab.h>
11 #include <linux/panic_notifier.h>
14 #include <xen/features.h>
15 #include <xen/interface/sched.h>
16 #include <xen/interface/version.h>
19 #include <asm/xen/hypercall.h>
20 #include <asm/xen/hypervisor.h>
22 #include <asm/e820/api.h>
23 #include <asm/setup.h>
29 EXPORT_SYMBOL_GPL(hypercall_page);
32 * Pointer to the xen_vcpu_info structure or
33 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
34 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
35 * but during boot it is switched to point to xen_vcpu_info.
36 * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
37 * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
38 * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
39 * which matches the cache line size of 64-bit x86 processors).
41 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
42 DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
44 /* Linux <-> Xen vCPU id mapping */
45 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
46 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
48 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
49 EXPORT_SYMBOL(machine_to_phys_mapping);
50 unsigned long machine_to_phys_nr;
51 EXPORT_SYMBOL(machine_to_phys_nr);
53 struct start_info *xen_start_info;
54 EXPORT_SYMBOL_GPL(xen_start_info);
56 struct shared_info xen_dummy_shared_info;
58 __read_mostly bool xen_have_vector_callback = true;
59 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
62 * NB: These need to live in .data or alike because they're used by
63 * xen_prepare_pvh() which runs before clearing the bss.
65 enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
66 EXPORT_SYMBOL_GPL(xen_domain_type);
67 uint32_t __ro_after_init xen_start_flags;
68 EXPORT_SYMBOL(xen_start_flags);
71 * Point at some empty memory to start with. We map the real shared_info
72 * page as soon as fixmap is up and running.
74 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
76 static int xen_cpu_up_online(unsigned int cpu)
78 xen_init_lock_cpu(cpu);
82 int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
83 int (*cpu_dead_cb)(unsigned int))
87 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
88 "x86/xen/guest:prepare",
89 cpu_up_prepare_cb, cpu_dead_cb);
91 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
92 "x86/xen/guest:online",
93 xen_cpu_up_online, NULL);
95 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
98 return rc >= 0 ? 0 : rc;
101 static void xen_vcpu_setup_restore(int cpu)
103 /* Any per_cpu(xen_vcpu) is stale, so reset it */
104 xen_vcpu_info_reset(cpu);
107 * For PVH and PVHVM, setup online VCPUs only. The rest will
108 * be handled by hotplug.
110 if (xen_pv_domain() ||
111 (xen_hvm_domain() && cpu_online(cpu)))
116 * On restore, set the vcpu placement up again.
117 * If it fails, then we're in a bad state, since
118 * we can't back out from using it...
120 void xen_vcpu_restore(void)
124 for_each_possible_cpu(cpu) {
125 bool other_cpu = (cpu != smp_processor_id());
128 if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
131 /* Only Xen 4.5 and higher support this. */
132 is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up,
133 xen_vcpu_nr(cpu), NULL) > 0;
135 if (other_cpu && is_up &&
136 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
139 if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
140 xen_setup_runstate_info(cpu);
142 xen_vcpu_setup_restore(cpu);
144 if (other_cpu && is_up &&
145 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
150 void xen_vcpu_info_reset(int cpu)
152 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
153 per_cpu(xen_vcpu, cpu) =
154 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
156 /* Set to NULL so that if somebody accesses it we get an OOPS */
157 per_cpu(xen_vcpu, cpu) = NULL;
161 void xen_vcpu_setup(int cpu)
163 struct vcpu_register_vcpu_info info;
165 struct vcpu_info *vcpup;
167 BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
168 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
171 * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu)
172 * and at restore (xen_vcpu_restore). Also called for hotplugged
173 * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm).
174 * However, the hypercall can only be done once (see below) so if a VCPU
175 * is offlined and comes back online then let's not redo the hypercall.
177 * For PV it is called during restore (xen_vcpu_restore) and bootup
178 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
181 if (xen_hvm_domain()) {
182 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
186 vcpup = &per_cpu(xen_vcpu_info, cpu);
187 info.mfn = arbitrary_virt_to_mfn(vcpup);
188 info.offset = offset_in_page(vcpup);
191 * N.B. This hypercall can _only_ be called once per CPU.
192 * Subsequent calls will error out with -EINVAL. This is due to
193 * the fact that hypervisor has no unregister variant and this
194 * hypercall does not allow to over-write info.mfn and
197 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
200 panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
202 per_cpu(xen_vcpu, cpu) = vcpup;
205 void __init xen_banner(void)
207 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
208 struct xen_extraversion extra;
210 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
212 pr_info("Booting kernel on %s\n", pv_info.name);
213 pr_info("Xen version: %u.%u%s%s\n",
214 version >> 16, version & 0xffff, extra.extraversion,
215 xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
216 ? " (preserve-AD)" : "");
219 /* Check if running on Xen version (major, minor) or later */
220 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
222 unsigned int version;
227 version = HYPERVISOR_xen_version(XENVER_version, NULL);
228 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
229 ((version >> 16) > major))
234 void __init xen_add_preferred_consoles(void)
236 add_preferred_console("xenboot", 0, NULL);
237 if (!boot_params.screen_info.orig_video_isVGA)
238 add_preferred_console("tty", 0, NULL);
239 add_preferred_console("hvc", 0, NULL);
240 if (boot_params.screen_info.orig_video_isVGA)
241 add_preferred_console("tty", 0, NULL);
244 void xen_reboot(int reason)
246 struct sched_shutdown r = { .reason = reason };
249 for_each_online_cpu(cpu)
252 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
256 static int reboot_reason = SHUTDOWN_reboot;
257 static bool xen_legacy_crash;
258 void xen_emergency_restart(void)
260 xen_reboot(reboot_reason);
264 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
266 if (!kexec_crash_loaded()) {
267 if (xen_legacy_crash)
268 xen_reboot(SHUTDOWN_crash);
270 reboot_reason = SHUTDOWN_crash;
273 * If panic_timeout==0 then we are supposed to wait forever.
274 * However, to preserve original dom0 behavior we have to drop
275 * into hypervisor. (domU behavior is controlled by its
278 if (panic_timeout == 0)
284 static int __init parse_xen_legacy_crash(char *arg)
286 xen_legacy_crash = true;
289 early_param("xen_legacy_crash", parse_xen_legacy_crash);
291 static struct notifier_block xen_panic_block = {
292 .notifier_call = xen_panic_event,
296 int xen_panic_handler_init(void)
298 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
302 void xen_pin_vcpu(int cpu)
304 static bool disable_pinning;
305 struct sched_pin_override pin_override;
311 pin_override.pcpu = cpu;
312 ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
314 /* Ignore errors when removing override. */
320 pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
322 disable_pinning = true;
325 WARN(1, "Trying to pin vcpu without having privilege to do so\n");
326 disable_pinning = true;
330 pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
336 WARN(1, "rc %d while trying to pin vcpu\n", ret);
337 disable_pinning = true;
341 #ifdef CONFIG_HOTPLUG_CPU
342 void xen_arch_register_cpu(int num)
344 arch_register_cpu(num);
346 EXPORT_SYMBOL(xen_arch_register_cpu);
348 void xen_arch_unregister_cpu(int num)
350 arch_unregister_cpu(num);
352 EXPORT_SYMBOL(xen_arch_unregister_cpu);
355 /* Amount of extra memory space we add to the e820 ranges */
356 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
358 void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
363 * No need to check for zero size, should happen rarely and will only
364 * write a new entry regarded to be unused due to zero size.
366 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
367 /* Add new region. */
368 if (xen_extra_mem[i].n_pfns == 0) {
369 xen_extra_mem[i].start_pfn = start_pfn;
370 xen_extra_mem[i].n_pfns = n_pfns;
373 /* Append to existing region. */
374 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
376 xen_extra_mem[i].n_pfns += n_pfns;
380 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
381 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
383 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));