1 // SPDX-License-Identifier: GPL-2.0
3 * hosting IBM Z kernel virtual machines (s390x)
5 * Copyright IBM Corp. 2008, 2020
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
10 * Jason J. Herne <jjherne@us.ibm.com>
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/lowcore.h>
41 #include <asm/switch_to.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
48 #include <asm/fpu/api.h>
53 #define CREATE_TRACE_POINTS
55 #include "trace-s390.h"
57 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
62 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63 KVM_GENERIC_VM_STATS(),
64 STATS_DESC_COUNTER(VM, inject_io),
65 STATS_DESC_COUNTER(VM, inject_float_mchk),
66 STATS_DESC_COUNTER(VM, inject_pfault_done),
67 STATS_DESC_COUNTER(VM, inject_service_signal),
68 STATS_DESC_COUNTER(VM, inject_virtio),
69 STATS_DESC_COUNTER(VM, aen_forward),
70 STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
71 STATS_DESC_COUNTER(VM, gmap_shadow_create),
72 STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
73 STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
74 STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
75 STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
76 STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
79 const struct kvm_stats_header kvm_vm_stats_header = {
80 .name_size = KVM_STATS_NAME_SIZE,
81 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
82 .id_offset = sizeof(struct kvm_stats_header),
83 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
84 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
85 sizeof(kvm_vm_stats_desc),
88 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
89 KVM_GENERIC_VCPU_STATS(),
90 STATS_DESC_COUNTER(VCPU, exit_userspace),
91 STATS_DESC_COUNTER(VCPU, exit_null),
92 STATS_DESC_COUNTER(VCPU, exit_external_request),
93 STATS_DESC_COUNTER(VCPU, exit_io_request),
94 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
95 STATS_DESC_COUNTER(VCPU, exit_stop_request),
96 STATS_DESC_COUNTER(VCPU, exit_validity),
97 STATS_DESC_COUNTER(VCPU, exit_instruction),
98 STATS_DESC_COUNTER(VCPU, exit_pei),
99 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
100 STATS_DESC_COUNTER(VCPU, instruction_lctl),
101 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
102 STATS_DESC_COUNTER(VCPU, instruction_stctl),
103 STATS_DESC_COUNTER(VCPU, instruction_stctg),
104 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
105 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
106 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
107 STATS_DESC_COUNTER(VCPU, deliver_ckc),
108 STATS_DESC_COUNTER(VCPU, deliver_cputm),
109 STATS_DESC_COUNTER(VCPU, deliver_external_call),
110 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
111 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
112 STATS_DESC_COUNTER(VCPU, deliver_virtio),
113 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
114 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
115 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
116 STATS_DESC_COUNTER(VCPU, deliver_program),
117 STATS_DESC_COUNTER(VCPU, deliver_io),
118 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
119 STATS_DESC_COUNTER(VCPU, exit_wait_state),
120 STATS_DESC_COUNTER(VCPU, inject_ckc),
121 STATS_DESC_COUNTER(VCPU, inject_cputm),
122 STATS_DESC_COUNTER(VCPU, inject_external_call),
123 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
124 STATS_DESC_COUNTER(VCPU, inject_mchk),
125 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
126 STATS_DESC_COUNTER(VCPU, inject_program),
127 STATS_DESC_COUNTER(VCPU, inject_restart),
128 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
129 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
130 STATS_DESC_COUNTER(VCPU, instruction_epsw),
131 STATS_DESC_COUNTER(VCPU, instruction_gs),
132 STATS_DESC_COUNTER(VCPU, instruction_io_other),
133 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
134 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
135 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
136 STATS_DESC_COUNTER(VCPU, instruction_ptff),
137 STATS_DESC_COUNTER(VCPU, instruction_sck),
138 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
139 STATS_DESC_COUNTER(VCPU, instruction_stidp),
140 STATS_DESC_COUNTER(VCPU, instruction_spx),
141 STATS_DESC_COUNTER(VCPU, instruction_stpx),
142 STATS_DESC_COUNTER(VCPU, instruction_stap),
143 STATS_DESC_COUNTER(VCPU, instruction_iske),
144 STATS_DESC_COUNTER(VCPU, instruction_ri),
145 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
146 STATS_DESC_COUNTER(VCPU, instruction_sske),
147 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
148 STATS_DESC_COUNTER(VCPU, instruction_stsi),
149 STATS_DESC_COUNTER(VCPU, instruction_stfl),
150 STATS_DESC_COUNTER(VCPU, instruction_tb),
151 STATS_DESC_COUNTER(VCPU, instruction_tpi),
152 STATS_DESC_COUNTER(VCPU, instruction_tprot),
153 STATS_DESC_COUNTER(VCPU, instruction_tsch),
154 STATS_DESC_COUNTER(VCPU, instruction_sie),
155 STATS_DESC_COUNTER(VCPU, instruction_essa),
156 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
166 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
167 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
168 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
169 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
170 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
171 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
172 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
175 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
176 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
177 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
178 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
179 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
180 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
181 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
182 STATS_DESC_COUNTER(VCPU, pfault_sync)
185 const struct kvm_stats_header kvm_vcpu_stats_header = {
186 .name_size = KVM_STATS_NAME_SIZE,
187 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
188 .id_offset = sizeof(struct kvm_stats_header),
189 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
190 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
191 sizeof(kvm_vcpu_stats_desc),
194 /* allow nested virtualization in KVM (if enabled by user space) */
196 module_param(nested, int, S_IRUGO);
197 MODULE_PARM_DESC(nested, "Nested virtualization support");
199 /* allow 1m huge page guest backing, if !nested */
201 module_param(hpage, int, 0444);
202 MODULE_PARM_DESC(hpage, "1m huge page backing support");
204 /* maximum percentage of steal time for polling. >100 is treated like 100 */
205 static u8 halt_poll_max_steal = 10;
206 module_param(halt_poll_max_steal, byte, 0644);
207 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
209 /* if set to true, the GISA will be initialized and used if available */
210 static bool use_gisa = true;
211 module_param(use_gisa, bool, 0644);
212 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
214 /* maximum diag9c forwarding per second */
215 unsigned int diag9c_forwarding_hz;
216 module_param(diag9c_forwarding_hz, uint, 0644);
217 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
220 * For now we handle at most 16 double words as this is what the s390 base
221 * kernel handles and stores in the prefix page. If we ever need to go beyond
222 * this, this requires changes to code, but the external uapi can stay.
224 #define SIZE_INTERNAL 16
227 * Base feature mask that defines default mask for facilities. Consists of the
228 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
230 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
232 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
233 * and defines the facilities that can be enabled via a cpu model.
235 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
237 static unsigned long kvm_s390_fac_size(void)
239 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
240 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
241 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
242 sizeof(stfle_fac_list));
244 return SIZE_INTERNAL;
247 /* available cpu features supported by kvm */
248 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
249 /* available subfunctions indicated via query / "test bit" */
250 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
252 static struct gmap_notifier gmap_notifier;
253 static struct gmap_notifier vsie_gmap_notifier;
254 debug_info_t *kvm_s390_dbf;
255 debug_info_t *kvm_s390_dbf_uv;
257 /* Section: not file related */
258 int kvm_arch_hardware_enable(void)
260 /* every s390 is virtualization enabled ;-) */
264 int kvm_arch_check_processor_compat(void *opaque)
269 /* forward declarations */
270 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
272 static int sca_switch_to_extended(struct kvm *kvm);
274 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
279 * The TOD jumps by delta, we have to compensate this by adding
280 * -delta to the epoch.
284 /* sign-extension - we're adding to signed values below */
289 if (scb->ecd & ECD_MEF) {
290 scb->epdx += delta_idx;
291 if (scb->epoch < delta)
297 * This callback is executed during stop_machine(). All CPUs are therefore
298 * temporarily stopped. In order not to change guest behavior, we have to
299 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
300 * so a CPU won't be stopped while calculating with the epoch.
302 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
306 struct kvm_vcpu *vcpu;
308 unsigned long long *delta = v;
310 list_for_each_entry(kvm, &vm_list, vm_list) {
311 kvm_for_each_vcpu(i, vcpu, kvm) {
312 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
314 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
315 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
317 if (vcpu->arch.cputm_enabled)
318 vcpu->arch.cputm_start += *delta;
319 if (vcpu->arch.vsie_block)
320 kvm_clock_sync_scb(vcpu->arch.vsie_block,
327 static struct notifier_block kvm_clock_notifier = {
328 .notifier_call = kvm_clock_sync,
331 int kvm_arch_hardware_setup(void *opaque)
333 gmap_notifier.notifier_call = kvm_gmap_notifier;
334 gmap_register_pte_notifier(&gmap_notifier);
335 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
336 gmap_register_pte_notifier(&vsie_gmap_notifier);
337 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
338 &kvm_clock_notifier);
342 void kvm_arch_hardware_unsetup(void)
344 gmap_unregister_pte_notifier(&gmap_notifier);
345 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
346 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
347 &kvm_clock_notifier);
350 static void allow_cpu_feat(unsigned long nr)
352 set_bit_inv(nr, kvm_s390_available_cpu_feat);
355 static inline int plo_test_bit(unsigned char nr)
357 unsigned long function = (unsigned long)nr | 0x100;
361 " lgr 0,%[function]\n"
362 /* Parameter registers are ignored for "test bit" */
367 : [function] "d" (function)
372 static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
377 /* Parameter registers are ignored */
378 " .insn rrf,%[opc] << 16,2,4,6,0\n"
380 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
381 : "cc", "memory", "0", "1");
384 #define INSN_SORTL 0xb938
385 #define INSN_DFLTCC 0xb939
387 static void kvm_s390_cpu_feat_init(void)
391 for (i = 0; i < 256; ++i) {
393 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
396 if (test_facility(28)) /* TOD-clock steering */
397 ptff(kvm_s390_available_subfunc.ptff,
398 sizeof(kvm_s390_available_subfunc.ptff),
401 if (test_facility(17)) { /* MSA */
402 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
403 kvm_s390_available_subfunc.kmac);
404 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.kmc);
406 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
407 kvm_s390_available_subfunc.km);
408 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kimd);
410 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
411 kvm_s390_available_subfunc.klmd);
413 if (test_facility(76)) /* MSA3 */
414 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.pckmo);
416 if (test_facility(77)) { /* MSA4 */
417 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
418 kvm_s390_available_subfunc.kmctr);
419 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
420 kvm_s390_available_subfunc.kmf);
421 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
422 kvm_s390_available_subfunc.kmo);
423 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
424 kvm_s390_available_subfunc.pcc);
426 if (test_facility(57)) /* MSA5 */
427 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
428 kvm_s390_available_subfunc.ppno);
430 if (test_facility(146)) /* MSA8 */
431 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
432 kvm_s390_available_subfunc.kma);
434 if (test_facility(155)) /* MSA9 */
435 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
436 kvm_s390_available_subfunc.kdsa);
438 if (test_facility(150)) /* SORTL */
439 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
441 if (test_facility(151)) /* DFLTCC */
442 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
444 if (MACHINE_HAS_ESOP)
445 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
448 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
451 !test_facility(3) || !nested)
453 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
454 if (sclp.has_64bscao)
455 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
457 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
459 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
463 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
465 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
467 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
471 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
472 * all skey handling functions read/set the skey from the PGSTE
473 * instead of the real storage key.
475 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
476 * pages being detected as preserved although they are resident.
478 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
479 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
481 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
482 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
483 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
485 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
486 * cannot easily shadow the SCA because of the ipte lock.
490 int kvm_arch_init(void *opaque)
494 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
498 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
499 if (!kvm_s390_dbf_uv)
502 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
503 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
506 kvm_s390_cpu_feat_init();
508 /* Register floating interrupt controller interface. */
509 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511 pr_err("A FLIC registration call failed with rc=%d\n", rc);
515 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
516 rc = kvm_s390_pci_init();
518 pr_err("Unable to allocate AIFT for PCI\n");
523 rc = kvm_s390_gib_init(GAL_ISC);
534 void kvm_arch_exit(void)
536 kvm_s390_gib_destroy();
537 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
539 debug_unregister(kvm_s390_dbf);
540 debug_unregister(kvm_s390_dbf_uv);
543 /* Section: device related */
544 long kvm_arch_dev_ioctl(struct file *filp,
545 unsigned int ioctl, unsigned long arg)
547 if (ioctl == KVM_S390_ENABLE_SIE)
548 return s390_enable_sie();
552 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
557 case KVM_CAP_S390_PSW:
558 case KVM_CAP_S390_GMAP:
559 case KVM_CAP_SYNC_MMU:
560 #ifdef CONFIG_KVM_S390_UCONTROL
561 case KVM_CAP_S390_UCONTROL:
563 case KVM_CAP_ASYNC_PF:
564 case KVM_CAP_SYNC_REGS:
565 case KVM_CAP_ONE_REG:
566 case KVM_CAP_ENABLE_CAP:
567 case KVM_CAP_S390_CSS_SUPPORT:
568 case KVM_CAP_IOEVENTFD:
569 case KVM_CAP_DEVICE_CTRL:
570 case KVM_CAP_S390_IRQCHIP:
571 case KVM_CAP_VM_ATTRIBUTES:
572 case KVM_CAP_MP_STATE:
573 case KVM_CAP_IMMEDIATE_EXIT:
574 case KVM_CAP_S390_INJECT_IRQ:
575 case KVM_CAP_S390_USER_SIGP:
576 case KVM_CAP_S390_USER_STSI:
577 case KVM_CAP_S390_SKEYS:
578 case KVM_CAP_S390_IRQ_STATE:
579 case KVM_CAP_S390_USER_INSTR0:
580 case KVM_CAP_S390_CMMA_MIGRATION:
581 case KVM_CAP_S390_AIS:
582 case KVM_CAP_S390_AIS_MIGRATION:
583 case KVM_CAP_S390_VCPU_RESETS:
584 case KVM_CAP_SET_GUEST_DEBUG:
585 case KVM_CAP_S390_DIAG318:
586 case KVM_CAP_S390_MEM_OP_EXTENSION:
589 case KVM_CAP_SET_GUEST_DEBUG2:
590 r = KVM_GUESTDBG_VALID_MASK;
592 case KVM_CAP_S390_HPAGE_1M:
594 if (hpage && !kvm_is_ucontrol(kvm))
597 case KVM_CAP_S390_MEM_OP:
600 case KVM_CAP_NR_VCPUS:
601 case KVM_CAP_MAX_VCPUS:
602 case KVM_CAP_MAX_VCPU_ID:
603 r = KVM_S390_BSCA_CPU_SLOTS;
604 if (!kvm_s390_use_sca_entries())
606 else if (sclp.has_esca && sclp.has_64bscao)
607 r = KVM_S390_ESCA_CPU_SLOTS;
608 if (ext == KVM_CAP_NR_VCPUS)
609 r = min_t(unsigned int, num_online_cpus(), r);
611 case KVM_CAP_S390_COW:
612 r = MACHINE_HAS_ESOP;
614 case KVM_CAP_S390_VECTOR_REGISTERS:
617 case KVM_CAP_S390_RI:
618 r = test_facility(64);
620 case KVM_CAP_S390_GS:
621 r = test_facility(133);
623 case KVM_CAP_S390_BPB:
624 r = test_facility(82);
626 case KVM_CAP_S390_PROTECTED:
627 r = is_prot_virt_host();
629 case KVM_CAP_S390_PROTECTED_DUMP: {
630 u64 pv_cmds_dump[] = {
631 BIT_UVC_CMD_DUMP_INIT,
632 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
633 BIT_UVC_CMD_DUMP_CPU,
634 BIT_UVC_CMD_DUMP_COMPLETE,
638 r = is_prot_virt_host();
640 for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
641 if (!test_bit_inv(pv_cmds_dump[i],
642 (unsigned long *)&uv_info.inst_calls_list)) {
649 case KVM_CAP_S390_ZPCI_OP:
650 r = kvm_s390_pci_interp_allowed();
652 case KVM_CAP_S390_CPU_TOPOLOGY:
653 r = test_facility(11);
661 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
664 gfn_t cur_gfn, last_gfn;
665 unsigned long gaddr, vmaddr;
666 struct gmap *gmap = kvm->arch.gmap;
667 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
669 /* Loop over all guest segments */
670 cur_gfn = memslot->base_gfn;
671 last_gfn = memslot->base_gfn + memslot->npages;
672 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
673 gaddr = gfn_to_gpa(cur_gfn);
674 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
675 if (kvm_is_error_hva(vmaddr))
678 bitmap_zero(bitmap, _PAGE_ENTRIES);
679 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
680 for (i = 0; i < _PAGE_ENTRIES; i++) {
681 if (test_bit(i, bitmap))
682 mark_page_dirty(kvm, cur_gfn + i);
685 if (fatal_signal_pending(current))
691 /* Section: vm related */
692 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
695 * Get (and clear) the dirty memory log for a memory slot.
697 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
698 struct kvm_dirty_log *log)
702 struct kvm_memory_slot *memslot;
705 if (kvm_is_ucontrol(kvm))
708 mutex_lock(&kvm->slots_lock);
711 if (log->slot >= KVM_USER_MEM_SLOTS)
714 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
718 /* Clear the dirty log */
720 n = kvm_dirty_bitmap_bytes(memslot);
721 memset(memslot->dirty_bitmap, 0, n);
725 mutex_unlock(&kvm->slots_lock);
729 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
732 struct kvm_vcpu *vcpu;
734 kvm_for_each_vcpu(i, vcpu, kvm) {
735 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
739 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
747 case KVM_CAP_S390_IRQCHIP:
748 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
749 kvm->arch.use_irqchip = 1;
752 case KVM_CAP_S390_USER_SIGP:
753 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
754 kvm->arch.user_sigp = 1;
757 case KVM_CAP_S390_VECTOR_REGISTERS:
758 mutex_lock(&kvm->lock);
759 if (kvm->created_vcpus) {
761 } else if (MACHINE_HAS_VX) {
762 set_kvm_facility(kvm->arch.model.fac_mask, 129);
763 set_kvm_facility(kvm->arch.model.fac_list, 129);
764 if (test_facility(134)) {
765 set_kvm_facility(kvm->arch.model.fac_mask, 134);
766 set_kvm_facility(kvm->arch.model.fac_list, 134);
768 if (test_facility(135)) {
769 set_kvm_facility(kvm->arch.model.fac_mask, 135);
770 set_kvm_facility(kvm->arch.model.fac_list, 135);
772 if (test_facility(148)) {
773 set_kvm_facility(kvm->arch.model.fac_mask, 148);
774 set_kvm_facility(kvm->arch.model.fac_list, 148);
776 if (test_facility(152)) {
777 set_kvm_facility(kvm->arch.model.fac_mask, 152);
778 set_kvm_facility(kvm->arch.model.fac_list, 152);
780 if (test_facility(192)) {
781 set_kvm_facility(kvm->arch.model.fac_mask, 192);
782 set_kvm_facility(kvm->arch.model.fac_list, 192);
787 mutex_unlock(&kvm->lock);
788 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
789 r ? "(not available)" : "(success)");
791 case KVM_CAP_S390_RI:
793 mutex_lock(&kvm->lock);
794 if (kvm->created_vcpus) {
796 } else if (test_facility(64)) {
797 set_kvm_facility(kvm->arch.model.fac_mask, 64);
798 set_kvm_facility(kvm->arch.model.fac_list, 64);
801 mutex_unlock(&kvm->lock);
802 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
803 r ? "(not available)" : "(success)");
805 case KVM_CAP_S390_AIS:
806 mutex_lock(&kvm->lock);
807 if (kvm->created_vcpus) {
810 set_kvm_facility(kvm->arch.model.fac_mask, 72);
811 set_kvm_facility(kvm->arch.model.fac_list, 72);
814 mutex_unlock(&kvm->lock);
815 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
816 r ? "(not available)" : "(success)");
818 case KVM_CAP_S390_GS:
820 mutex_lock(&kvm->lock);
821 if (kvm->created_vcpus) {
823 } else if (test_facility(133)) {
824 set_kvm_facility(kvm->arch.model.fac_mask, 133);
825 set_kvm_facility(kvm->arch.model.fac_list, 133);
828 mutex_unlock(&kvm->lock);
829 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
830 r ? "(not available)" : "(success)");
832 case KVM_CAP_S390_HPAGE_1M:
833 mutex_lock(&kvm->lock);
834 if (kvm->created_vcpus)
836 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
840 mmap_write_lock(kvm->mm);
841 kvm->mm->context.allow_gmap_hpage_1m = 1;
842 mmap_write_unlock(kvm->mm);
844 * We might have to create fake 4k page
845 * tables. To avoid that the hardware works on
846 * stale PGSTEs, we emulate these instructions.
848 kvm->arch.use_skf = 0;
849 kvm->arch.use_pfmfi = 0;
851 mutex_unlock(&kvm->lock);
852 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
853 r ? "(not available)" : "(success)");
855 case KVM_CAP_S390_USER_STSI:
856 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
857 kvm->arch.user_stsi = 1;
860 case KVM_CAP_S390_USER_INSTR0:
861 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
862 kvm->arch.user_instr0 = 1;
863 icpt_operexc_on_all_vcpus(kvm);
866 case KVM_CAP_S390_CPU_TOPOLOGY:
868 mutex_lock(&kvm->lock);
869 if (kvm->created_vcpus) {
871 } else if (test_facility(11)) {
872 set_kvm_facility(kvm->arch.model.fac_mask, 11);
873 set_kvm_facility(kvm->arch.model.fac_list, 11);
876 mutex_unlock(&kvm->lock);
877 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
878 r ? "(not available)" : "(success)");
887 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
891 switch (attr->attr) {
892 case KVM_S390_VM_MEM_LIMIT_SIZE:
894 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
895 kvm->arch.mem_limit);
896 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
906 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
910 switch (attr->attr) {
911 case KVM_S390_VM_MEM_ENABLE_CMMA:
916 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
917 mutex_lock(&kvm->lock);
918 if (kvm->created_vcpus)
920 else if (kvm->mm->context.allow_gmap_hpage_1m)
923 kvm->arch.use_cmma = 1;
924 /* Not compatible with cmma. */
925 kvm->arch.use_pfmfi = 0;
928 mutex_unlock(&kvm->lock);
930 case KVM_S390_VM_MEM_CLR_CMMA:
935 if (!kvm->arch.use_cmma)
938 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
939 mutex_lock(&kvm->lock);
940 idx = srcu_read_lock(&kvm->srcu);
941 s390_reset_cmma(kvm->arch.gmap->mm);
942 srcu_read_unlock(&kvm->srcu, idx);
943 mutex_unlock(&kvm->lock);
946 case KVM_S390_VM_MEM_LIMIT_SIZE: {
947 unsigned long new_limit;
949 if (kvm_is_ucontrol(kvm))
952 if (get_user(new_limit, (u64 __user *)attr->addr))
955 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
956 new_limit > kvm->arch.mem_limit)
962 /* gmap_create takes last usable address */
963 if (new_limit != KVM_S390_NO_MEM_LIMIT)
967 mutex_lock(&kvm->lock);
968 if (!kvm->created_vcpus) {
969 /* gmap_create will round the limit up */
970 struct gmap *new = gmap_create(current->mm, new_limit);
975 gmap_remove(kvm->arch.gmap);
977 kvm->arch.gmap = new;
981 mutex_unlock(&kvm->lock);
982 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
983 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
984 (void *) kvm->arch.gmap->asce);
994 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
996 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
998 struct kvm_vcpu *vcpu;
1001 kvm_s390_vcpu_block_all(kvm);
1003 kvm_for_each_vcpu(i, vcpu, kvm) {
1004 kvm_s390_vcpu_crypto_setup(vcpu);
1005 /* recreate the shadow crycb by leaving the VSIE handler */
1006 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1009 kvm_s390_vcpu_unblock_all(kvm);
1012 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1014 mutex_lock(&kvm->lock);
1015 switch (attr->attr) {
1016 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1017 if (!test_kvm_facility(kvm, 76)) {
1018 mutex_unlock(&kvm->lock);
1022 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1023 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1024 kvm->arch.crypto.aes_kw = 1;
1025 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1027 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1028 if (!test_kvm_facility(kvm, 76)) {
1029 mutex_unlock(&kvm->lock);
1033 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1034 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1035 kvm->arch.crypto.dea_kw = 1;
1036 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1038 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1039 if (!test_kvm_facility(kvm, 76)) {
1040 mutex_unlock(&kvm->lock);
1043 kvm->arch.crypto.aes_kw = 0;
1044 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1045 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1046 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1048 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1049 if (!test_kvm_facility(kvm, 76)) {
1050 mutex_unlock(&kvm->lock);
1053 kvm->arch.crypto.dea_kw = 0;
1054 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1055 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1056 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1058 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1059 if (!ap_instructions_available()) {
1060 mutex_unlock(&kvm->lock);
1063 kvm->arch.crypto.apie = 1;
1065 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1066 if (!ap_instructions_available()) {
1067 mutex_unlock(&kvm->lock);
1070 kvm->arch.crypto.apie = 0;
1073 mutex_unlock(&kvm->lock);
1077 kvm_s390_vcpu_crypto_reset_all(kvm);
1078 mutex_unlock(&kvm->lock);
1082 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1084 /* Only set the ECB bits after guest requests zPCI interpretation */
1085 if (!vcpu->kvm->arch.use_zpci_interp)
1088 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1089 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1092 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1094 struct kvm_vcpu *vcpu;
1097 lockdep_assert_held(&kvm->lock);
1099 if (!kvm_s390_pci_interp_allowed())
1103 * If host is configured for PCI and the necessary facilities are
1104 * available, turn on interpretation for the life of this guest
1106 kvm->arch.use_zpci_interp = 1;
1108 kvm_s390_vcpu_block_all(kvm);
1110 kvm_for_each_vcpu(i, vcpu, kvm) {
1111 kvm_s390_vcpu_pci_setup(vcpu);
1112 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1115 kvm_s390_vcpu_unblock_all(kvm);
1118 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1121 struct kvm_vcpu *vcpu;
1123 kvm_for_each_vcpu(cx, vcpu, kvm)
1124 kvm_s390_sync_request(req, vcpu);
1128 * Must be called with kvm->srcu held to avoid races on memslots, and with
1129 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1131 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1133 struct kvm_memory_slot *ms;
1134 struct kvm_memslots *slots;
1135 unsigned long ram_pages = 0;
1138 /* migration mode already enabled */
1139 if (kvm->arch.migration_mode)
1141 slots = kvm_memslots(kvm);
1142 if (!slots || kvm_memslots_empty(slots))
1145 if (!kvm->arch.use_cmma) {
1146 kvm->arch.migration_mode = 1;
1149 /* mark all the pages in active slots as dirty */
1150 kvm_for_each_memslot(ms, bkt, slots) {
1151 if (!ms->dirty_bitmap)
1154 * The second half of the bitmap is only used on x86,
1155 * and would be wasted otherwise, so we put it to good
1156 * use here to keep track of the state of the storage
1159 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1160 ram_pages += ms->npages;
1162 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1163 kvm->arch.migration_mode = 1;
1164 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1169 * Must be called with kvm->slots_lock to avoid races with ourselves and
1170 * kvm_s390_vm_start_migration.
1172 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1174 /* migration mode already disabled */
1175 if (!kvm->arch.migration_mode)
1177 kvm->arch.migration_mode = 0;
1178 if (kvm->arch.use_cmma)
1179 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1183 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1184 struct kvm_device_attr *attr)
1188 mutex_lock(&kvm->slots_lock);
1189 switch (attr->attr) {
1190 case KVM_S390_VM_MIGRATION_START:
1191 res = kvm_s390_vm_start_migration(kvm);
1193 case KVM_S390_VM_MIGRATION_STOP:
1194 res = kvm_s390_vm_stop_migration(kvm);
1199 mutex_unlock(&kvm->slots_lock);
1204 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1205 struct kvm_device_attr *attr)
1207 u64 mig = kvm->arch.migration_mode;
1209 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1212 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1217 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1219 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1221 struct kvm_s390_vm_tod_clock gtod;
1223 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1226 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1228 __kvm_s390_set_tod_clock(kvm, >od);
1230 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1231 gtod.epoch_idx, gtod.tod);
1236 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1240 if (copy_from_user(>od_high, (void __user *)attr->addr,
1246 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1251 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1253 struct kvm_s390_vm_tod_clock gtod = { 0 };
1255 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1259 __kvm_s390_set_tod_clock(kvm, >od);
1260 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1264 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1271 mutex_lock(&kvm->lock);
1273 * For protected guests, the TOD is managed by the ultravisor, so trying
1274 * to change it will never bring the expected results.
1276 if (kvm_s390_pv_is_protected(kvm)) {
1281 switch (attr->attr) {
1282 case KVM_S390_VM_TOD_EXT:
1283 ret = kvm_s390_set_tod_ext(kvm, attr);
1285 case KVM_S390_VM_TOD_HIGH:
1286 ret = kvm_s390_set_tod_high(kvm, attr);
1288 case KVM_S390_VM_TOD_LOW:
1289 ret = kvm_s390_set_tod_low(kvm, attr);
1297 mutex_unlock(&kvm->lock);
1301 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1302 struct kvm_s390_vm_tod_clock *gtod)
1304 union tod_clock clk;
1308 store_tod_clock_ext(&clk);
1310 gtod->tod = clk.tod + kvm->arch.epoch;
1311 gtod->epoch_idx = 0;
1312 if (test_kvm_facility(kvm, 139)) {
1313 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1314 if (gtod->tod < clk.tod)
1315 gtod->epoch_idx += 1;
1321 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1323 struct kvm_s390_vm_tod_clock gtod;
1325 memset(>od, 0, sizeof(gtod));
1326 kvm_s390_get_tod_clock(kvm, >od);
1327 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1330 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1331 gtod.epoch_idx, gtod.tod);
1335 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1339 if (copy_to_user((void __user *)attr->addr, >od_high,
1342 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1347 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1351 gtod = kvm_s390_get_tod_clock_fast(kvm);
1352 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1354 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1359 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1366 switch (attr->attr) {
1367 case KVM_S390_VM_TOD_EXT:
1368 ret = kvm_s390_get_tod_ext(kvm, attr);
1370 case KVM_S390_VM_TOD_HIGH:
1371 ret = kvm_s390_get_tod_high(kvm, attr);
1373 case KVM_S390_VM_TOD_LOW:
1374 ret = kvm_s390_get_tod_low(kvm, attr);
1383 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1385 struct kvm_s390_vm_cpu_processor *proc;
1386 u16 lowest_ibc, unblocked_ibc;
1389 mutex_lock(&kvm->lock);
1390 if (kvm->created_vcpus) {
1394 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1399 if (!copy_from_user(proc, (void __user *)attr->addr,
1401 kvm->arch.model.cpuid = proc->cpuid;
1402 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1403 unblocked_ibc = sclp.ibc & 0xfff;
1404 if (lowest_ibc && proc->ibc) {
1405 if (proc->ibc > unblocked_ibc)
1406 kvm->arch.model.ibc = unblocked_ibc;
1407 else if (proc->ibc < lowest_ibc)
1408 kvm->arch.model.ibc = lowest_ibc;
1410 kvm->arch.model.ibc = proc->ibc;
1412 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1413 S390_ARCH_FAC_LIST_SIZE_BYTE);
1414 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1415 kvm->arch.model.ibc,
1416 kvm->arch.model.cpuid);
1417 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1418 kvm->arch.model.fac_list[0],
1419 kvm->arch.model.fac_list[1],
1420 kvm->arch.model.fac_list[2]);
1425 mutex_unlock(&kvm->lock);
1429 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1430 struct kvm_device_attr *attr)
1432 struct kvm_s390_vm_cpu_feat data;
1434 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1436 if (!bitmap_subset((unsigned long *) data.feat,
1437 kvm_s390_available_cpu_feat,
1438 KVM_S390_VM_CPU_FEAT_NR_BITS))
1441 mutex_lock(&kvm->lock);
1442 if (kvm->created_vcpus) {
1443 mutex_unlock(&kvm->lock);
1446 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1447 mutex_unlock(&kvm->lock);
1448 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1455 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1456 struct kvm_device_attr *attr)
1458 mutex_lock(&kvm->lock);
1459 if (kvm->created_vcpus) {
1460 mutex_unlock(&kvm->lock);
1464 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1465 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1466 mutex_unlock(&kvm->lock);
1469 mutex_unlock(&kvm->lock);
1471 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1472 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1473 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1474 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1475 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1476 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1477 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1478 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1479 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1480 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1481 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1482 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1483 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1484 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1485 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1486 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1487 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1488 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1489 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1491 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1492 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1493 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1494 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1495 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1496 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1497 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1498 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1500 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1501 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1502 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1503 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1504 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1505 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1506 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1507 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1509 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1510 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1512 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1515 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1518 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1521 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1523 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1532 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1536 switch (attr->attr) {
1537 case KVM_S390_VM_CPU_PROCESSOR:
1538 ret = kvm_s390_set_processor(kvm, attr);
1540 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1541 ret = kvm_s390_set_processor_feat(kvm, attr);
1543 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1544 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1550 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1552 struct kvm_s390_vm_cpu_processor *proc;
1555 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1560 proc->cpuid = kvm->arch.model.cpuid;
1561 proc->ibc = kvm->arch.model.ibc;
1562 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1563 S390_ARCH_FAC_LIST_SIZE_BYTE);
1564 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1565 kvm->arch.model.ibc,
1566 kvm->arch.model.cpuid);
1567 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1568 kvm->arch.model.fac_list[0],
1569 kvm->arch.model.fac_list[1],
1570 kvm->arch.model.fac_list[2]);
1571 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1578 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1580 struct kvm_s390_vm_cpu_machine *mach;
1583 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1588 get_cpu_id((struct cpuid *) &mach->cpuid);
1589 mach->ibc = sclp.ibc;
1590 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1591 S390_ARCH_FAC_LIST_SIZE_BYTE);
1592 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1593 sizeof(stfle_fac_list));
1594 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1595 kvm->arch.model.ibc,
1596 kvm->arch.model.cpuid);
1597 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1601 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1605 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1612 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1613 struct kvm_device_attr *attr)
1615 struct kvm_s390_vm_cpu_feat data;
1617 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1618 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1620 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1627 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1628 struct kvm_device_attr *attr)
1630 struct kvm_s390_vm_cpu_feat data;
1632 bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1633 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1635 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1642 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1643 struct kvm_device_attr *attr)
1645 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1646 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1649 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1650 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1651 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1652 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1653 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1654 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1655 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1656 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1657 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1658 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1659 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1660 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1661 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1662 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1663 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1664 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1665 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1666 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1667 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1668 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1669 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1670 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1671 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1672 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1673 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1674 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1675 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1676 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1677 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1678 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1679 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1680 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1681 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1682 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1683 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1684 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1685 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1686 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1687 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1688 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1689 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1690 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1691 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1692 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1693 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1694 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1695 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1696 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1697 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1698 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1699 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1700 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1701 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1702 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1703 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1704 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1705 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1710 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1711 struct kvm_device_attr *attr)
1713 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1714 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1717 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1718 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1719 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1720 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1721 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1722 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1723 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1724 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1725 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1726 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1727 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1728 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1729 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1730 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1731 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1732 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1733 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1734 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1735 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1736 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1737 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1738 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1739 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1740 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1741 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1742 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1743 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1744 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1745 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1746 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1747 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1748 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1749 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1750 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1751 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1752 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1753 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1754 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1755 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1756 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1757 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1758 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1759 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1760 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1761 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1762 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1763 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1764 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1765 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1766 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1767 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1768 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1769 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1770 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1771 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1772 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1773 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1778 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1782 switch (attr->attr) {
1783 case KVM_S390_VM_CPU_PROCESSOR:
1784 ret = kvm_s390_get_processor(kvm, attr);
1786 case KVM_S390_VM_CPU_MACHINE:
1787 ret = kvm_s390_get_machine(kvm, attr);
1789 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1790 ret = kvm_s390_get_processor_feat(kvm, attr);
1792 case KVM_S390_VM_CPU_MACHINE_FEAT:
1793 ret = kvm_s390_get_machine_feat(kvm, attr);
1795 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1796 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1798 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1799 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1806 * kvm_s390_update_topology_change_report - update CPU topology change report
1807 * @kvm: guest KVM description
1808 * @val: set or clear the MTCR bit
1810 * Updates the Multiprocessor Topology-Change-Report bit to signal
1811 * the guest with a topology change.
1812 * This is only relevant if the topology facility is present.
1814 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1816 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1818 union sca_utility new, old;
1819 struct bsca_block *sca;
1821 read_lock(&kvm->arch.sca_lock);
1822 sca = kvm->arch.sca;
1824 old = READ_ONCE(sca->utility);
1827 } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
1828 read_unlock(&kvm->arch.sca_lock);
1831 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1832 struct kvm_device_attr *attr)
1834 if (!test_kvm_facility(kvm, 11))
1837 kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1841 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1842 struct kvm_device_attr *attr)
1846 if (!test_kvm_facility(kvm, 11))
1849 read_lock(&kvm->arch.sca_lock);
1850 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1851 read_unlock(&kvm->arch.sca_lock);
1853 return put_user(topo, (u8 __user *)attr->addr);
1856 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1860 switch (attr->group) {
1861 case KVM_S390_VM_MEM_CTRL:
1862 ret = kvm_s390_set_mem_control(kvm, attr);
1864 case KVM_S390_VM_TOD:
1865 ret = kvm_s390_set_tod(kvm, attr);
1867 case KVM_S390_VM_CPU_MODEL:
1868 ret = kvm_s390_set_cpu_model(kvm, attr);
1870 case KVM_S390_VM_CRYPTO:
1871 ret = kvm_s390_vm_set_crypto(kvm, attr);
1873 case KVM_S390_VM_MIGRATION:
1874 ret = kvm_s390_vm_set_migration(kvm, attr);
1876 case KVM_S390_VM_CPU_TOPOLOGY:
1877 ret = kvm_s390_set_topo_change_indication(kvm, attr);
1887 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1891 switch (attr->group) {
1892 case KVM_S390_VM_MEM_CTRL:
1893 ret = kvm_s390_get_mem_control(kvm, attr);
1895 case KVM_S390_VM_TOD:
1896 ret = kvm_s390_get_tod(kvm, attr);
1898 case KVM_S390_VM_CPU_MODEL:
1899 ret = kvm_s390_get_cpu_model(kvm, attr);
1901 case KVM_S390_VM_MIGRATION:
1902 ret = kvm_s390_vm_get_migration(kvm, attr);
1904 case KVM_S390_VM_CPU_TOPOLOGY:
1905 ret = kvm_s390_get_topo_change_indication(kvm, attr);
1915 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1919 switch (attr->group) {
1920 case KVM_S390_VM_MEM_CTRL:
1921 switch (attr->attr) {
1922 case KVM_S390_VM_MEM_ENABLE_CMMA:
1923 case KVM_S390_VM_MEM_CLR_CMMA:
1924 ret = sclp.has_cmma ? 0 : -ENXIO;
1926 case KVM_S390_VM_MEM_LIMIT_SIZE:
1934 case KVM_S390_VM_TOD:
1935 switch (attr->attr) {
1936 case KVM_S390_VM_TOD_LOW:
1937 case KVM_S390_VM_TOD_HIGH:
1945 case KVM_S390_VM_CPU_MODEL:
1946 switch (attr->attr) {
1947 case KVM_S390_VM_CPU_PROCESSOR:
1948 case KVM_S390_VM_CPU_MACHINE:
1949 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1950 case KVM_S390_VM_CPU_MACHINE_FEAT:
1951 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1952 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1960 case KVM_S390_VM_CRYPTO:
1961 switch (attr->attr) {
1962 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1963 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1964 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1965 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1968 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1969 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1970 ret = ap_instructions_available() ? 0 : -ENXIO;
1977 case KVM_S390_VM_MIGRATION:
1980 case KVM_S390_VM_CPU_TOPOLOGY:
1981 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
1991 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1995 int srcu_idx, i, r = 0;
1997 if (args->flags != 0)
2000 /* Is this guest using storage keys? */
2001 if (!mm_uses_skeys(current->mm))
2002 return KVM_S390_GET_SKEYS_NONE;
2004 /* Enforce sane limit on memory allocation */
2005 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2008 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2012 mmap_read_lock(current->mm);
2013 srcu_idx = srcu_read_lock(&kvm->srcu);
2014 for (i = 0; i < args->count; i++) {
2015 hva = gfn_to_hva(kvm, args->start_gfn + i);
2016 if (kvm_is_error_hva(hva)) {
2021 r = get_guest_storage_key(current->mm, hva, &keys[i]);
2025 srcu_read_unlock(&kvm->srcu, srcu_idx);
2026 mmap_read_unlock(current->mm);
2029 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2030 sizeof(uint8_t) * args->count);
2039 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2043 int srcu_idx, i, r = 0;
2046 if (args->flags != 0)
2049 /* Enforce sane limit on memory allocation */
2050 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2053 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2057 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2058 sizeof(uint8_t) * args->count);
2064 /* Enable storage key handling for the guest */
2065 r = s390_enable_skey();
2070 mmap_read_lock(current->mm);
2071 srcu_idx = srcu_read_lock(&kvm->srcu);
2072 while (i < args->count) {
2074 hva = gfn_to_hva(kvm, args->start_gfn + i);
2075 if (kvm_is_error_hva(hva)) {
2080 /* Lowest order bit is reserved */
2081 if (keys[i] & 0x01) {
2086 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2088 r = fixup_user_fault(current->mm, hva,
2089 FAULT_FLAG_WRITE, &unlocked);
2096 srcu_read_unlock(&kvm->srcu, srcu_idx);
2097 mmap_read_unlock(current->mm);
2104 * Base address and length must be sent at the start of each block, therefore
2105 * it's cheaper to send some clean data, as long as it's less than the size of
2108 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2109 /* for consistency */
2110 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2112 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2113 u8 *res, unsigned long bufsize)
2115 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2118 while (args->count < bufsize) {
2119 hva = gfn_to_hva(kvm, cur_gfn);
2121 * We return an error if the first value was invalid, but we
2122 * return successfully if at least one value was copied.
2124 if (kvm_is_error_hva(hva))
2125 return args->count ? 0 : -EFAULT;
2126 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2128 res[args->count++] = (pgstev >> 24) & 0x43;
2135 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2138 return ____gfn_to_memslot(slots, gfn, true);
2141 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2142 unsigned long cur_gfn)
2144 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2145 unsigned long ofs = cur_gfn - ms->base_gfn;
2146 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2148 if (ms->base_gfn + ms->npages <= cur_gfn) {
2149 mnode = rb_next(mnode);
2150 /* If we are above the highest slot, wrap around */
2152 mnode = rb_first(&slots->gfn_tree);
2154 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2158 if (cur_gfn < ms->base_gfn)
2161 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2162 while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2163 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2164 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2166 return ms->base_gfn + ofs;
2169 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2170 u8 *res, unsigned long bufsize)
2172 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2173 struct kvm_memslots *slots = kvm_memslots(kvm);
2174 struct kvm_memory_slot *ms;
2176 if (unlikely(kvm_memslots_empty(slots)))
2179 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2180 ms = gfn_to_memslot(kvm, cur_gfn);
2182 args->start_gfn = cur_gfn;
2185 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2186 mem_end = kvm_s390_get_gfn_end(slots);
2188 while (args->count < bufsize) {
2189 hva = gfn_to_hva(kvm, cur_gfn);
2190 if (kvm_is_error_hva(hva))
2192 /* Decrement only if we actually flipped the bit to 0 */
2193 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2194 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2195 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2197 /* Save the value */
2198 res[args->count++] = (pgstev >> 24) & 0x43;
2199 /* If the next bit is too far away, stop. */
2200 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2202 /* If we reached the previous "next", find the next one */
2203 if (cur_gfn == next_gfn)
2204 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2205 /* Reached the end of memory or of the buffer, stop */
2206 if ((next_gfn >= mem_end) ||
2207 (next_gfn - args->start_gfn >= bufsize))
2210 /* Reached the end of the current memslot, take the next one. */
2211 if (cur_gfn - ms->base_gfn >= ms->npages) {
2212 ms = gfn_to_memslot(kvm, cur_gfn);
2221 * This function searches for the next page with dirty CMMA attributes, and
2222 * saves the attributes in the buffer up to either the end of the buffer or
2223 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2224 * no trailing clean bytes are saved.
2225 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2226 * output buffer will indicate 0 as length.
2228 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2229 struct kvm_s390_cmma_log *args)
2231 unsigned long bufsize;
2232 int srcu_idx, peek, ret;
2235 if (!kvm->arch.use_cmma)
2237 /* Invalid/unsupported flags were specified */
2238 if (args->flags & ~KVM_S390_CMMA_PEEK)
2240 /* Migration mode query, and we are not doing a migration */
2241 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2242 if (!peek && !kvm->arch.migration_mode)
2244 /* CMMA is disabled or was not used, or the buffer has length zero */
2245 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2246 if (!bufsize || !kvm->mm->context.uses_cmm) {
2247 memset(args, 0, sizeof(*args));
2250 /* We are not peeking, and there are no dirty pages */
2251 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2252 memset(args, 0, sizeof(*args));
2256 values = vmalloc(bufsize);
2260 mmap_read_lock(kvm->mm);
2261 srcu_idx = srcu_read_lock(&kvm->srcu);
2263 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2265 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2266 srcu_read_unlock(&kvm->srcu, srcu_idx);
2267 mmap_read_unlock(kvm->mm);
2269 if (kvm->arch.migration_mode)
2270 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2272 args->remaining = 0;
2274 if (copy_to_user((void __user *)args->values, values, args->count))
2282 * This function sets the CMMA attributes for the given pages. If the input
2283 * buffer has zero length, no action is taken, otherwise the attributes are
2284 * set and the mm->context.uses_cmm flag is set.
2286 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2287 const struct kvm_s390_cmma_log *args)
2289 unsigned long hva, mask, pgstev, i;
2291 int srcu_idx, r = 0;
2295 if (!kvm->arch.use_cmma)
2297 /* invalid/unsupported flags */
2298 if (args->flags != 0)
2300 /* Enforce sane limit on memory allocation */
2301 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2304 if (args->count == 0)
2307 bits = vmalloc(array_size(sizeof(*bits), args->count));
2311 r = copy_from_user(bits, (void __user *)args->values, args->count);
2317 mmap_read_lock(kvm->mm);
2318 srcu_idx = srcu_read_lock(&kvm->srcu);
2319 for (i = 0; i < args->count; i++) {
2320 hva = gfn_to_hva(kvm, args->start_gfn + i);
2321 if (kvm_is_error_hva(hva)) {
2327 pgstev = pgstev << 24;
2328 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2329 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2331 srcu_read_unlock(&kvm->srcu, srcu_idx);
2332 mmap_read_unlock(kvm->mm);
2334 if (!kvm->mm->context.uses_cmm) {
2335 mmap_write_lock(kvm->mm);
2336 kvm->mm->context.uses_cmm = 1;
2337 mmap_write_unlock(kvm->mm);
2345 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2347 * @kvm: the VM whose protected vCPUs are to be converted
2348 * @rc: return value for the RC field of the UVC (in case of error)
2349 * @rrc: return value for the RRC field of the UVC (in case of error)
2351 * Does not stop in case of error, tries to convert as many
2352 * CPUs as possible. In case of error, the RC and RRC of the last error are
2355 * Return: 0 in case of success, otherwise -EIO
2357 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2359 struct kvm_vcpu *vcpu;
2365 * We ignore failures and try to destroy as many CPUs as possible.
2366 * At the same time we must not free the assigned resources when
2367 * this fails, as the ultravisor has still access to that memory.
2368 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2370 * We want to return the first failure rc and rrc, though.
2372 kvm_for_each_vcpu(i, vcpu, kvm) {
2373 mutex_lock(&vcpu->mutex);
2374 if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2379 mutex_unlock(&vcpu->mutex);
2381 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2383 kvm_s390_gisa_enable(kvm);
2388 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2390 * @kvm: the VM whose protected vCPUs are to be converted
2391 * @rc: return value for the RC field of the UVC (in case of error)
2392 * @rrc: return value for the RRC field of the UVC (in case of error)
2394 * Tries to undo the conversion in case of error.
2396 * Return: 0 in case of success, otherwise -EIO
2398 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2404 struct kvm_vcpu *vcpu;
2406 /* Disable the GISA if the ultravisor does not support AIV. */
2407 if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2408 kvm_s390_gisa_disable(kvm);
2410 kvm_for_each_vcpu(i, vcpu, kvm) {
2411 mutex_lock(&vcpu->mutex);
2412 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2413 mutex_unlock(&vcpu->mutex);
2418 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2423 * Here we provide user space with a direct interface to query UV
2424 * related data like UV maxima and available features as well as
2425 * feature specific data.
2427 * To facilitate future extension of the data structures we'll try to
2428 * write data up to the maximum requested length.
2430 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2434 switch (info->header.id) {
2435 case KVM_PV_INFO_VM: {
2436 len_min = sizeof(info->header) + sizeof(info->vm);
2438 if (info->header.len_max < len_min)
2441 memcpy(info->vm.inst_calls_list,
2442 uv_info.inst_calls_list,
2443 sizeof(uv_info.inst_calls_list));
2445 /* It's max cpuid not max cpus, so it's off by one */
2446 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2447 info->vm.max_guests = uv_info.max_num_sec_conf;
2448 info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2449 info->vm.feature_indication = uv_info.uv_feature_indications;
2453 case KVM_PV_INFO_DUMP: {
2454 len_min = sizeof(info->header) + sizeof(info->dump);
2456 if (info->header.len_max < len_min)
2459 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2460 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2461 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2469 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2470 struct kvm_s390_pv_dmp dmp)
2473 void __user *result_buff = (void __user *)dmp.buff_addr;
2475 switch (dmp.subcmd) {
2476 case KVM_PV_DUMP_INIT: {
2477 if (kvm->arch.pv.dumping)
2481 * Block SIE entry as concurrent dump UVCs could lead
2484 kvm_s390_vcpu_block_all(kvm);
2486 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2487 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2488 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2491 kvm->arch.pv.dumping = true;
2493 kvm_s390_vcpu_unblock_all(kvm);
2498 case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2499 if (!kvm->arch.pv.dumping)
2503 * gaddr is an output parameter since we might stop
2504 * early. As dmp will be copied back in our caller, we
2505 * don't need to do it ourselves.
2507 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2508 &cmd->rc, &cmd->rrc);
2511 case KVM_PV_DUMP_COMPLETE: {
2512 if (!kvm->arch.pv.dumping)
2516 if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2519 r = kvm_s390_pv_dump_complete(kvm, result_buff,
2520 &cmd->rc, &cmd->rrc);
2531 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2535 void __user *argp = (void __user *)cmd->data;
2538 case KVM_PV_ENABLE: {
2540 if (kvm_s390_pv_is_protected(kvm))
2544 * FMT 4 SIE needs esca. As we never switch back to bsca from
2545 * esca, we need no cleanup in the error cases below
2547 r = sca_switch_to_extended(kvm);
2551 mmap_write_lock(current->mm);
2552 r = gmap_mark_unmergeable();
2553 mmap_write_unlock(current->mm);
2557 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2561 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2563 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2565 /* we need to block service interrupts from now on */
2566 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2569 case KVM_PV_DISABLE: {
2571 if (!kvm_s390_pv_is_protected(kvm))
2574 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2576 * If a CPU could not be destroyed, destroy VM will also fail.
2577 * There is no point in trying to destroy it. Instead return
2578 * the rc and rrc from the first CPU that failed destroying.
2582 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
2584 /* no need to block service interrupts any more */
2585 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2588 case KVM_PV_SET_SEC_PARMS: {
2589 struct kvm_s390_pv_sec_parm parms = {};
2593 if (!kvm_s390_pv_is_protected(kvm))
2597 if (copy_from_user(&parms, argp, sizeof(parms)))
2600 /* Currently restricted to 8KB */
2602 if (parms.length > PAGE_SIZE * 2)
2606 hdr = vmalloc(parms.length);
2611 if (!copy_from_user(hdr, (void __user *)parms.origin,
2613 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2614 &cmd->rc, &cmd->rrc);
2619 case KVM_PV_UNPACK: {
2620 struct kvm_s390_pv_unp unp = {};
2623 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2627 if (copy_from_user(&unp, argp, sizeof(unp)))
2630 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2631 &cmd->rc, &cmd->rrc);
2634 case KVM_PV_VERIFY: {
2636 if (!kvm_s390_pv_is_protected(kvm))
2639 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2640 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2641 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2645 case KVM_PV_PREP_RESET: {
2647 if (!kvm_s390_pv_is_protected(kvm))
2650 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2651 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2652 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2656 case KVM_PV_UNSHARE_ALL: {
2658 if (!kvm_s390_pv_is_protected(kvm))
2661 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2662 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2663 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2668 struct kvm_s390_pv_info info = {};
2672 * No need to check the VM protection here.
2674 * Maybe user space wants to query some of the data
2675 * when the VM is still unprotected. If we see the
2676 * need to fence a new data command we can still
2677 * return an error in the info handler.
2681 if (copy_from_user(&info, argp, sizeof(info.header)))
2685 if (info.header.len_max < sizeof(info.header))
2688 data_len = kvm_s390_handle_pv_info(&info);
2694 * If a data command struct is extended (multiple
2695 * times) this can be used to determine how much of it
2698 info.header.len_written = data_len;
2701 if (copy_to_user(argp, &info, data_len))
2708 struct kvm_s390_pv_dmp dmp;
2711 if (!kvm_s390_pv_is_protected(kvm))
2715 if (copy_from_user(&dmp, argp, sizeof(dmp)))
2718 r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2722 if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2735 static bool access_key_invalid(u8 access_key)
2737 return access_key > 0xf;
2740 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2742 void __user *uaddr = (void __user *)mop->buf;
2743 u64 supported_flags;
2744 void *tmpbuf = NULL;
2747 supported_flags = KVM_S390_MEMOP_F_SKEY_PROTECTION
2748 | KVM_S390_MEMOP_F_CHECK_ONLY;
2749 if (mop->flags & ~supported_flags || !mop->size)
2751 if (mop->size > MEM_OP_MAX_SIZE)
2754 * This is technically a heuristic only, if the kvm->lock is not
2755 * taken, it is not guaranteed that the vm is/remains non-protected.
2756 * This is ok from a kernel perspective, wrongdoing is detected
2757 * on the access, -EFAULT is returned and the vm may crash the
2758 * next time it accesses the memory in question.
2759 * There is no sane usecase to do switching and a memop on two
2760 * different CPUs at the same time.
2762 if (kvm_s390_pv_get_handle(kvm))
2764 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2765 if (access_key_invalid(mop->key))
2770 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2771 tmpbuf = vmalloc(mop->size);
2776 srcu_idx = srcu_read_lock(&kvm->srcu);
2778 if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2784 case KVM_S390_MEMOP_ABSOLUTE_READ: {
2785 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2786 r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_FETCH, mop->key);
2788 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2789 mop->size, GACC_FETCH, mop->key);
2791 if (copy_to_user(uaddr, tmpbuf, mop->size))
2797 case KVM_S390_MEMOP_ABSOLUTE_WRITE: {
2798 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2799 r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_STORE, mop->key);
2801 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2805 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2806 mop->size, GACC_STORE, mop->key);
2815 srcu_read_unlock(&kvm->srcu, srcu_idx);
2821 long kvm_arch_vm_ioctl(struct file *filp,
2822 unsigned int ioctl, unsigned long arg)
2824 struct kvm *kvm = filp->private_data;
2825 void __user *argp = (void __user *)arg;
2826 struct kvm_device_attr attr;
2830 case KVM_S390_INTERRUPT: {
2831 struct kvm_s390_interrupt s390int;
2834 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2836 r = kvm_s390_inject_vm(kvm, &s390int);
2839 case KVM_CREATE_IRQCHIP: {
2840 struct kvm_irq_routing_entry routing;
2843 if (kvm->arch.use_irqchip) {
2844 /* Set up dummy routing. */
2845 memset(&routing, 0, sizeof(routing));
2846 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2850 case KVM_SET_DEVICE_ATTR: {
2852 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2854 r = kvm_s390_vm_set_attr(kvm, &attr);
2857 case KVM_GET_DEVICE_ATTR: {
2859 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2861 r = kvm_s390_vm_get_attr(kvm, &attr);
2864 case KVM_HAS_DEVICE_ATTR: {
2866 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2868 r = kvm_s390_vm_has_attr(kvm, &attr);
2871 case KVM_S390_GET_SKEYS: {
2872 struct kvm_s390_skeys args;
2875 if (copy_from_user(&args, argp,
2876 sizeof(struct kvm_s390_skeys)))
2878 r = kvm_s390_get_skeys(kvm, &args);
2881 case KVM_S390_SET_SKEYS: {
2882 struct kvm_s390_skeys args;
2885 if (copy_from_user(&args, argp,
2886 sizeof(struct kvm_s390_skeys)))
2888 r = kvm_s390_set_skeys(kvm, &args);
2891 case KVM_S390_GET_CMMA_BITS: {
2892 struct kvm_s390_cmma_log args;
2895 if (copy_from_user(&args, argp, sizeof(args)))
2897 mutex_lock(&kvm->slots_lock);
2898 r = kvm_s390_get_cmma_bits(kvm, &args);
2899 mutex_unlock(&kvm->slots_lock);
2901 r = copy_to_user(argp, &args, sizeof(args));
2907 case KVM_S390_SET_CMMA_BITS: {
2908 struct kvm_s390_cmma_log args;
2911 if (copy_from_user(&args, argp, sizeof(args)))
2913 mutex_lock(&kvm->slots_lock);
2914 r = kvm_s390_set_cmma_bits(kvm, &args);
2915 mutex_unlock(&kvm->slots_lock);
2918 case KVM_S390_PV_COMMAND: {
2919 struct kvm_pv_cmd args;
2921 /* protvirt means user cpu state */
2922 kvm_s390_set_user_cpu_state_ctrl(kvm);
2924 if (!is_prot_virt_host()) {
2928 if (copy_from_user(&args, argp, sizeof(args))) {
2936 mutex_lock(&kvm->lock);
2937 r = kvm_s390_handle_pv(kvm, &args);
2938 mutex_unlock(&kvm->lock);
2939 if (copy_to_user(argp, &args, sizeof(args))) {
2945 case KVM_S390_MEM_OP: {
2946 struct kvm_s390_mem_op mem_op;
2948 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2949 r = kvm_s390_vm_mem_op(kvm, &mem_op);
2954 case KVM_S390_ZPCI_OP: {
2955 struct kvm_s390_zpci_op args;
2958 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2960 if (copy_from_user(&args, argp, sizeof(args))) {
2964 r = kvm_s390_pci_zpci_op(kvm, &args);
2974 static int kvm_s390_apxa_installed(void)
2976 struct ap_config_info info;
2978 if (ap_instructions_available()) {
2979 if (ap_qci(&info) == 0)
2987 * The format of the crypto control block (CRYCB) is specified in the 3 low
2988 * order bits of the CRYCB designation (CRYCBD) field as follows:
2989 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2990 * AP extended addressing (APXA) facility are installed.
2991 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2992 * Format 2: Both the APXA and MSAX3 facilities are installed
2994 static void kvm_s390_set_crycb_format(struct kvm *kvm)
2996 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2998 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2999 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3001 /* Check whether MSAX3 is installed */
3002 if (!test_kvm_facility(kvm, 76))
3005 if (kvm_s390_apxa_installed())
3006 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3008 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3012 * kvm_arch_crypto_set_masks
3014 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3016 * @apm: the mask identifying the accessible AP adapters
3017 * @aqm: the mask identifying the accessible AP domains
3018 * @adm: the mask identifying the accessible AP control domains
3020 * Set the masks that identify the adapters, domains and control domains to
3021 * which the KVM guest is granted access.
3023 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3026 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3027 unsigned long *aqm, unsigned long *adm)
3029 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3031 kvm_s390_vcpu_block_all(kvm);
3033 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3034 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3035 memcpy(crycb->apcb1.apm, apm, 32);
3036 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3037 apm[0], apm[1], apm[2], apm[3]);
3038 memcpy(crycb->apcb1.aqm, aqm, 32);
3039 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3040 aqm[0], aqm[1], aqm[2], aqm[3]);
3041 memcpy(crycb->apcb1.adm, adm, 32);
3042 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3043 adm[0], adm[1], adm[2], adm[3]);
3046 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3047 memcpy(crycb->apcb0.apm, apm, 8);
3048 memcpy(crycb->apcb0.aqm, aqm, 2);
3049 memcpy(crycb->apcb0.adm, adm, 2);
3050 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3051 apm[0], *((unsigned short *)aqm),
3052 *((unsigned short *)adm));
3054 default: /* Can not happen */
3058 /* recreate the shadow crycb for each vcpu */
3059 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3060 kvm_s390_vcpu_unblock_all(kvm);
3062 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3065 * kvm_arch_crypto_clear_masks
3067 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3070 * Clear the masks that identify the adapters, domains and control domains to
3071 * which the KVM guest is granted access.
3073 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3076 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3078 kvm_s390_vcpu_block_all(kvm);
3080 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3081 sizeof(kvm->arch.crypto.crycb->apcb0));
3082 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3083 sizeof(kvm->arch.crypto.crycb->apcb1));
3085 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3086 /* recreate the shadow crycb for each vcpu */
3087 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3088 kvm_s390_vcpu_unblock_all(kvm);
3090 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3092 static u64 kvm_s390_get_initial_cpuid(void)
3097 cpuid.version = 0xff;
3098 return *((u64 *) &cpuid);
3101 static void kvm_s390_crypto_init(struct kvm *kvm)
3103 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3104 kvm_s390_set_crycb_format(kvm);
3105 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3107 if (!test_kvm_facility(kvm, 76))
3110 /* Enable AES/DEA protected key functions by default */
3111 kvm->arch.crypto.aes_kw = 1;
3112 kvm->arch.crypto.dea_kw = 1;
3113 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3114 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3115 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3116 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3119 static void sca_dispose(struct kvm *kvm)
3121 if (kvm->arch.use_esca)
3122 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3124 free_page((unsigned long)(kvm->arch.sca));
3125 kvm->arch.sca = NULL;
3128 void kvm_arch_free_vm(struct kvm *kvm)
3130 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3131 kvm_s390_pci_clear_list(kvm);
3133 __kvm_arch_free_vm(kvm);
3136 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3138 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3140 char debug_name[16];
3141 static unsigned long sca_offset;
3144 #ifdef CONFIG_KVM_S390_UCONTROL
3145 if (type & ~KVM_VM_S390_UCONTROL)
3147 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3154 rc = s390_enable_sie();
3160 if (!sclp.has_64bscao)
3161 alloc_flags |= GFP_DMA;
3162 rwlock_init(&kvm->arch.sca_lock);
3163 /* start with basic SCA */
3164 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3167 mutex_lock(&kvm_lock);
3169 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3171 kvm->arch.sca = (struct bsca_block *)
3172 ((char *) kvm->arch.sca + sca_offset);
3173 mutex_unlock(&kvm_lock);
3175 sprintf(debug_name, "kvm-%u", current->pid);
3177 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3181 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3182 kvm->arch.sie_page2 =
3183 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3184 if (!kvm->arch.sie_page2)
3187 kvm->arch.sie_page2->kvm = kvm;
3188 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3190 for (i = 0; i < kvm_s390_fac_size(); i++) {
3191 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3192 (kvm_s390_fac_base[i] |
3193 kvm_s390_fac_ext[i]);
3194 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3195 kvm_s390_fac_base[i];
3197 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3199 /* we are always in czam mode - even on pre z14 machines */
3200 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3201 set_kvm_facility(kvm->arch.model.fac_list, 138);
3202 /* we emulate STHYI in kvm */
3203 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3204 set_kvm_facility(kvm->arch.model.fac_list, 74);
3205 if (MACHINE_HAS_TLB_GUEST) {
3206 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3207 set_kvm_facility(kvm->arch.model.fac_list, 147);
3210 if (css_general_characteristics.aiv && test_facility(65))
3211 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3213 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3214 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3216 kvm_s390_crypto_init(kvm);
3218 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3219 mutex_lock(&kvm->lock);
3220 kvm_s390_pci_init_list(kvm);
3221 kvm_s390_vcpu_pci_enable_interp(kvm);
3222 mutex_unlock(&kvm->lock);
3225 mutex_init(&kvm->arch.float_int.ais_lock);
3226 spin_lock_init(&kvm->arch.float_int.lock);
3227 for (i = 0; i < FIRQ_LIST_COUNT; i++)
3228 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3229 init_waitqueue_head(&kvm->arch.ipte_wq);
3230 mutex_init(&kvm->arch.ipte_mutex);
3232 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3233 VM_EVENT(kvm, 3, "vm created with type %lu", type);
3235 if (type & KVM_VM_S390_UCONTROL) {
3236 kvm->arch.gmap = NULL;
3237 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3239 if (sclp.hamax == U64_MAX)
3240 kvm->arch.mem_limit = TASK_SIZE_MAX;
3242 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3244 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3245 if (!kvm->arch.gmap)
3247 kvm->arch.gmap->private = kvm;
3248 kvm->arch.gmap->pfault_enabled = 0;
3251 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3252 kvm->arch.use_skf = sclp.has_skey;
3253 spin_lock_init(&kvm->arch.start_stop_lock);
3254 kvm_s390_vsie_init(kvm);
3256 kvm_s390_gisa_init(kvm);
3257 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3261 free_page((unsigned long)kvm->arch.sie_page2);
3262 debug_unregister(kvm->arch.dbf);
3264 KVM_EVENT(3, "creation of vm failed: %d", rc);
3268 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3274 kvm_s390_clear_local_irqs(vcpu);
3275 kvm_clear_async_pf_completion_queue(vcpu);
3276 if (!kvm_is_ucontrol(vcpu->kvm))
3278 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3280 if (kvm_is_ucontrol(vcpu->kvm))
3281 gmap_remove(vcpu->arch.gmap);
3283 if (vcpu->kvm->arch.use_cmma)
3284 kvm_s390_vcpu_unsetup_cmma(vcpu);
3285 /* We can not hold the vcpu mutex here, we are already dying */
3286 if (kvm_s390_pv_cpu_get_handle(vcpu))
3287 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3288 free_page((unsigned long)(vcpu->arch.sie_block));
3291 void kvm_arch_destroy_vm(struct kvm *kvm)
3295 kvm_destroy_vcpus(kvm);
3297 kvm_s390_gisa_destroy(kvm);
3299 * We are already at the end of life and kvm->lock is not taken.
3300 * This is ok as the file descriptor is closed by now and nobody
3301 * can mess with the pv state. To avoid lockdep_assert_held from
3302 * complaining we do not use kvm_s390_pv_is_protected.
3304 if (kvm_s390_pv_get_handle(kvm))
3305 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
3307 * Remove the mmu notifier only when the whole KVM VM is torn down,
3308 * and only if one was registered to begin with. If the VM is
3309 * currently not protected, but has been previously been protected,
3310 * then it's possible that the notifier is still registered.
3312 if (kvm->arch.pv.mmu_notifier.ops)
3313 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3315 debug_unregister(kvm->arch.dbf);
3316 free_page((unsigned long)kvm->arch.sie_page2);
3317 if (!kvm_is_ucontrol(kvm))
3318 gmap_remove(kvm->arch.gmap);
3319 kvm_s390_destroy_adapters(kvm);
3320 kvm_s390_clear_float_irqs(kvm);
3321 kvm_s390_vsie_destroy(kvm);
3322 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3325 /* Section: vcpu related */
3326 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3328 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3329 if (!vcpu->arch.gmap)
3331 vcpu->arch.gmap->private = vcpu->kvm;
3336 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3338 if (!kvm_s390_use_sca_entries())
3340 read_lock(&vcpu->kvm->arch.sca_lock);
3341 if (vcpu->kvm->arch.use_esca) {
3342 struct esca_block *sca = vcpu->kvm->arch.sca;
3344 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3345 sca->cpu[vcpu->vcpu_id].sda = 0;
3347 struct bsca_block *sca = vcpu->kvm->arch.sca;
3349 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3350 sca->cpu[vcpu->vcpu_id].sda = 0;
3352 read_unlock(&vcpu->kvm->arch.sca_lock);
3355 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3357 if (!kvm_s390_use_sca_entries()) {
3358 struct bsca_block *sca = vcpu->kvm->arch.sca;
3360 /* we still need the basic sca for the ipte control */
3361 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3362 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
3365 read_lock(&vcpu->kvm->arch.sca_lock);
3366 if (vcpu->kvm->arch.use_esca) {
3367 struct esca_block *sca = vcpu->kvm->arch.sca;
3369 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
3370 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3371 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
3372 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3373 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3375 struct bsca_block *sca = vcpu->kvm->arch.sca;
3377 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
3378 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3379 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
3380 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3382 read_unlock(&vcpu->kvm->arch.sca_lock);
3385 /* Basic SCA to Extended SCA data copy routines */
3386 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3389 d->sigp_ctrl.c = s->sigp_ctrl.c;
3390 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3393 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3397 d->ipte_control = s->ipte_control;
3399 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3400 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3403 static int sca_switch_to_extended(struct kvm *kvm)
3405 struct bsca_block *old_sca = kvm->arch.sca;
3406 struct esca_block *new_sca;
3407 struct kvm_vcpu *vcpu;
3408 unsigned long vcpu_idx;
3411 if (kvm->arch.use_esca)
3414 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3418 scaoh = (u32)((u64)(new_sca) >> 32);
3419 scaol = (u32)(u64)(new_sca) & ~0x3fU;
3421 kvm_s390_vcpu_block_all(kvm);
3422 write_lock(&kvm->arch.sca_lock);
3424 sca_copy_b_to_e(new_sca, old_sca);
3426 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3427 vcpu->arch.sie_block->scaoh = scaoh;
3428 vcpu->arch.sie_block->scaol = scaol;
3429 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3431 kvm->arch.sca = new_sca;
3432 kvm->arch.use_esca = 1;
3434 write_unlock(&kvm->arch.sca_lock);
3435 kvm_s390_vcpu_unblock_all(kvm);
3437 free_page((unsigned long)old_sca);
3439 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3440 old_sca, kvm->arch.sca);
3444 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3448 if (!kvm_s390_use_sca_entries()) {
3449 if (id < KVM_MAX_VCPUS)
3453 if (id < KVM_S390_BSCA_CPU_SLOTS)
3455 if (!sclp.has_esca || !sclp.has_64bscao)
3458 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3460 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3463 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3464 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3466 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3467 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3468 vcpu->arch.cputm_start = get_tod_clock_fast();
3469 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3472 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3473 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3475 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3476 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3477 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3478 vcpu->arch.cputm_start = 0;
3479 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3482 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3483 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3485 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3486 vcpu->arch.cputm_enabled = true;
3487 __start_cpu_timer_accounting(vcpu);
3490 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3491 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3493 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3494 __stop_cpu_timer_accounting(vcpu);
3495 vcpu->arch.cputm_enabled = false;
3498 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3500 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3501 __enable_cpu_timer_accounting(vcpu);
3505 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3507 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3508 __disable_cpu_timer_accounting(vcpu);
3512 /* set the cpu timer - may only be called from the VCPU thread itself */
3513 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3515 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3516 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3517 if (vcpu->arch.cputm_enabled)
3518 vcpu->arch.cputm_start = get_tod_clock_fast();
3519 vcpu->arch.sie_block->cputm = cputm;
3520 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3524 /* update and get the cpu timer - can also be called from other VCPU threads */
3525 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3530 if (unlikely(!vcpu->arch.cputm_enabled))
3531 return vcpu->arch.sie_block->cputm;
3533 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3535 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3537 * If the writer would ever execute a read in the critical
3538 * section, e.g. in irq context, we have a deadlock.
3540 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3541 value = vcpu->arch.sie_block->cputm;
3542 /* if cputm_start is 0, accounting is being started/stopped */
3543 if (likely(vcpu->arch.cputm_start))
3544 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3545 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3550 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3553 gmap_enable(vcpu->arch.enabled_gmap);
3554 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3555 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3556 __start_cpu_timer_accounting(vcpu);
3560 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3563 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3564 __stop_cpu_timer_accounting(vcpu);
3565 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3566 vcpu->arch.enabled_gmap = gmap_get_enabled();
3567 gmap_disable(vcpu->arch.enabled_gmap);
3571 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3573 mutex_lock(&vcpu->kvm->lock);
3575 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3576 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3578 mutex_unlock(&vcpu->kvm->lock);
3579 if (!kvm_is_ucontrol(vcpu->kvm)) {
3580 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3583 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3584 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3585 /* make vcpu_load load the right gmap on the first trigger */
3586 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3589 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3591 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3592 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3597 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3599 /* At least one ECC subfunction must be present */
3600 return kvm_has_pckmo_subfunc(kvm, 32) ||
3601 kvm_has_pckmo_subfunc(kvm, 33) ||
3602 kvm_has_pckmo_subfunc(kvm, 34) ||
3603 kvm_has_pckmo_subfunc(kvm, 40) ||
3604 kvm_has_pckmo_subfunc(kvm, 41);
3608 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3611 * If the AP instructions are not being interpreted and the MSAX3
3612 * facility is not configured for the guest, there is nothing to set up.
3614 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3617 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3618 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3619 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3620 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3622 if (vcpu->kvm->arch.crypto.apie)
3623 vcpu->arch.sie_block->eca |= ECA_APIE;
3625 /* Set up protected key support */
3626 if (vcpu->kvm->arch.crypto.aes_kw) {
3627 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3628 /* ecc is also wrapped with AES key */
3629 if (kvm_has_pckmo_ecc(vcpu->kvm))
3630 vcpu->arch.sie_block->ecd |= ECD_ECC;
3633 if (vcpu->kvm->arch.crypto.dea_kw)
3634 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3637 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3639 free_page(vcpu->arch.sie_block->cbrlo);
3640 vcpu->arch.sie_block->cbrlo = 0;
3643 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3645 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
3646 if (!vcpu->arch.sie_block->cbrlo)
3651 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3653 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3655 vcpu->arch.sie_block->ibc = model->ibc;
3656 if (test_kvm_facility(vcpu->kvm, 7))
3657 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
3660 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3665 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3669 if (test_kvm_facility(vcpu->kvm, 78))
3670 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3671 else if (test_kvm_facility(vcpu->kvm, 8))
3672 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3674 kvm_s390_vcpu_setup_model(vcpu);
3676 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3677 if (MACHINE_HAS_ESOP)
3678 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3679 if (test_kvm_facility(vcpu->kvm, 9))
3680 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3681 if (test_kvm_facility(vcpu->kvm, 11))
3682 vcpu->arch.sie_block->ecb |= ECB_PTF;
3683 if (test_kvm_facility(vcpu->kvm, 73))
3684 vcpu->arch.sie_block->ecb |= ECB_TE;
3685 if (!kvm_is_ucontrol(vcpu->kvm))
3686 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3688 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3689 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3690 if (test_kvm_facility(vcpu->kvm, 130))
3691 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3692 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3694 vcpu->arch.sie_block->eca |= ECA_CEI;
3696 vcpu->arch.sie_block->eca |= ECA_IB;
3698 vcpu->arch.sie_block->eca |= ECA_SII;
3699 if (sclp.has_sigpif)
3700 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3701 if (test_kvm_facility(vcpu->kvm, 129)) {
3702 vcpu->arch.sie_block->eca |= ECA_VX;
3703 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3705 if (test_kvm_facility(vcpu->kvm, 139))
3706 vcpu->arch.sie_block->ecd |= ECD_MEF;
3707 if (test_kvm_facility(vcpu->kvm, 156))
3708 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3709 if (vcpu->arch.sie_block->gd) {
3710 vcpu->arch.sie_block->eca |= ECA_AIV;
3711 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3712 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3714 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3716 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3719 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3721 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3723 if (vcpu->kvm->arch.use_cmma) {
3724 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3728 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3729 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3731 vcpu->arch.sie_block->hpid = HPID_KVM;
3733 kvm_s390_vcpu_crypto_setup(vcpu);
3735 kvm_s390_vcpu_pci_setup(vcpu);
3737 mutex_lock(&vcpu->kvm->lock);
3738 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3739 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3741 kvm_s390_vcpu_unsetup_cmma(vcpu);
3743 mutex_unlock(&vcpu->kvm->lock);
3748 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3750 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3755 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3757 struct sie_page *sie_page;
3760 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3761 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3765 vcpu->arch.sie_block = &sie_page->sie_block;
3766 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3768 /* the real guest size will always be smaller than msl */
3769 vcpu->arch.sie_block->mso = 0;
3770 vcpu->arch.sie_block->msl = sclp.hamax;
3772 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3773 spin_lock_init(&vcpu->arch.local_int.lock);
3774 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3775 seqcount_init(&vcpu->arch.cputm_seqcount);
3777 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3778 kvm_clear_async_pf_completion_queue(vcpu);
3779 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3786 kvm_s390_set_prefix(vcpu, 0);
3787 if (test_kvm_facility(vcpu->kvm, 64))
3788 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3789 if (test_kvm_facility(vcpu->kvm, 82))
3790 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3791 if (test_kvm_facility(vcpu->kvm, 133))
3792 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3793 if (test_kvm_facility(vcpu->kvm, 156))
3794 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3795 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3796 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3799 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3801 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3803 if (kvm_is_ucontrol(vcpu->kvm)) {
3804 rc = __kvm_ucontrol_vcpu_init(vcpu);
3806 goto out_free_sie_block;
3809 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3810 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3811 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3813 rc = kvm_s390_vcpu_setup(vcpu);
3815 goto out_ucontrol_uninit;
3817 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3820 out_ucontrol_uninit:
3821 if (kvm_is_ucontrol(vcpu->kvm))
3822 gmap_remove(vcpu->arch.gmap);
3824 free_page((unsigned long)(vcpu->arch.sie_block));
3828 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3830 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3831 return kvm_s390_vcpu_has_irq(vcpu, 0);
3834 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3836 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3839 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3841 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3845 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3847 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3850 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3852 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3856 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3858 return atomic_read(&vcpu->arch.sie_block->prog20) &
3859 (PROG_BLOCK_SIE | PROG_REQUEST);
3862 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3864 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3868 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3869 * If the CPU is not running (e.g. waiting as idle) the function will
3870 * return immediately. */
3871 void exit_sie(struct kvm_vcpu *vcpu)
3873 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3874 kvm_s390_vsie_kick(vcpu);
3875 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3879 /* Kick a guest cpu out of SIE to process a request synchronously */
3880 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3882 __kvm_make_request(req, vcpu);
3883 kvm_s390_vcpu_request(vcpu);
3886 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3889 struct kvm *kvm = gmap->private;
3890 struct kvm_vcpu *vcpu;
3891 unsigned long prefix;
3894 if (gmap_is_shadow(gmap))
3896 if (start >= 1UL << 31)
3897 /* We are only interested in prefix pages */
3899 kvm_for_each_vcpu(i, vcpu, kvm) {
3900 /* match against both prefix pages */
3901 prefix = kvm_s390_get_prefix(vcpu);
3902 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3903 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3905 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
3910 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3912 /* do not poll with more than halt_poll_max_steal percent of steal time */
3913 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3914 READ_ONCE(halt_poll_max_steal)) {
3915 vcpu->stat.halt_no_poll_steal++;
3921 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3923 /* kvm common code refers to this, but never calls it */
3928 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3929 struct kvm_one_reg *reg)
3934 case KVM_REG_S390_TODPR:
3935 r = put_user(vcpu->arch.sie_block->todpr,
3936 (u32 __user *)reg->addr);
3938 case KVM_REG_S390_EPOCHDIFF:
3939 r = put_user(vcpu->arch.sie_block->epoch,
3940 (u64 __user *)reg->addr);
3942 case KVM_REG_S390_CPU_TIMER:
3943 r = put_user(kvm_s390_get_cpu_timer(vcpu),
3944 (u64 __user *)reg->addr);
3946 case KVM_REG_S390_CLOCK_COMP:
3947 r = put_user(vcpu->arch.sie_block->ckc,
3948 (u64 __user *)reg->addr);
3950 case KVM_REG_S390_PFTOKEN:
3951 r = put_user(vcpu->arch.pfault_token,
3952 (u64 __user *)reg->addr);
3954 case KVM_REG_S390_PFCOMPARE:
3955 r = put_user(vcpu->arch.pfault_compare,
3956 (u64 __user *)reg->addr);
3958 case KVM_REG_S390_PFSELECT:
3959 r = put_user(vcpu->arch.pfault_select,
3960 (u64 __user *)reg->addr);
3962 case KVM_REG_S390_PP:
3963 r = put_user(vcpu->arch.sie_block->pp,
3964 (u64 __user *)reg->addr);
3966 case KVM_REG_S390_GBEA:
3967 r = put_user(vcpu->arch.sie_block->gbea,
3968 (u64 __user *)reg->addr);
3977 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3978 struct kvm_one_reg *reg)
3984 case KVM_REG_S390_TODPR:
3985 r = get_user(vcpu->arch.sie_block->todpr,
3986 (u32 __user *)reg->addr);
3988 case KVM_REG_S390_EPOCHDIFF:
3989 r = get_user(vcpu->arch.sie_block->epoch,
3990 (u64 __user *)reg->addr);
3992 case KVM_REG_S390_CPU_TIMER:
3993 r = get_user(val, (u64 __user *)reg->addr);
3995 kvm_s390_set_cpu_timer(vcpu, val);
3997 case KVM_REG_S390_CLOCK_COMP:
3998 r = get_user(vcpu->arch.sie_block->ckc,
3999 (u64 __user *)reg->addr);
4001 case KVM_REG_S390_PFTOKEN:
4002 r = get_user(vcpu->arch.pfault_token,
4003 (u64 __user *)reg->addr);
4004 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4005 kvm_clear_async_pf_completion_queue(vcpu);
4007 case KVM_REG_S390_PFCOMPARE:
4008 r = get_user(vcpu->arch.pfault_compare,
4009 (u64 __user *)reg->addr);
4011 case KVM_REG_S390_PFSELECT:
4012 r = get_user(vcpu->arch.pfault_select,
4013 (u64 __user *)reg->addr);
4015 case KVM_REG_S390_PP:
4016 r = get_user(vcpu->arch.sie_block->pp,
4017 (u64 __user *)reg->addr);
4019 case KVM_REG_S390_GBEA:
4020 r = get_user(vcpu->arch.sie_block->gbea,
4021 (u64 __user *)reg->addr);
4030 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4032 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4033 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4034 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4036 kvm_clear_async_pf_completion_queue(vcpu);
4037 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4038 kvm_s390_vcpu_stop(vcpu);
4039 kvm_s390_clear_local_irqs(vcpu);
4042 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4044 /* Initial reset is a superset of the normal reset */
4045 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4048 * This equals initial cpu reset in pop, but we don't switch to ESA.
4049 * We do not only reset the internal data, but also ...
4051 vcpu->arch.sie_block->gpsw.mask = 0;
4052 vcpu->arch.sie_block->gpsw.addr = 0;
4053 kvm_s390_set_prefix(vcpu, 0);
4054 kvm_s390_set_cpu_timer(vcpu, 0);
4055 vcpu->arch.sie_block->ckc = 0;
4056 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4057 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4058 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4060 /* ... the data in sync regs */
4061 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4062 vcpu->run->s.regs.ckc = 0;
4063 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4064 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4065 vcpu->run->psw_addr = 0;
4066 vcpu->run->psw_mask = 0;
4067 vcpu->run->s.regs.todpr = 0;
4068 vcpu->run->s.regs.cputm = 0;
4069 vcpu->run->s.regs.ckc = 0;
4070 vcpu->run->s.regs.pp = 0;
4071 vcpu->run->s.regs.gbea = 1;
4072 vcpu->run->s.regs.fpc = 0;
4074 * Do not reset these registers in the protected case, as some of
4075 * them are overlayed and they are not accessible in this case
4078 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4079 vcpu->arch.sie_block->gbea = 1;
4080 vcpu->arch.sie_block->pp = 0;
4081 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4082 vcpu->arch.sie_block->todpr = 0;
4086 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4088 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4090 /* Clear reset is a superset of the initial reset */
4091 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4093 memset(®s->gprs, 0, sizeof(regs->gprs));
4094 memset(®s->vrs, 0, sizeof(regs->vrs));
4095 memset(®s->acrs, 0, sizeof(regs->acrs));
4096 memset(®s->gscb, 0, sizeof(regs->gscb));
4099 regs->etoken_extension = 0;
4102 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4105 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
4110 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4113 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4118 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4119 struct kvm_sregs *sregs)
4123 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4124 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4130 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4131 struct kvm_sregs *sregs)
4135 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4136 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4142 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4148 vcpu->run->s.regs.fpc = fpu->fpc;
4150 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4151 (freg_t *) fpu->fprs);
4153 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4159 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4163 /* make sure we have the latest values */
4166 convert_vx_to_fp((freg_t *) fpu->fprs,
4167 (__vector128 *) vcpu->run->s.regs.vrs);
4169 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4170 fpu->fpc = vcpu->run->s.regs.fpc;
4176 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4180 if (!is_vcpu_stopped(vcpu))
4183 vcpu->run->psw_mask = psw.mask;
4184 vcpu->run->psw_addr = psw.addr;
4189 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4190 struct kvm_translation *tr)
4192 return -EINVAL; /* not implemented yet */
4195 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4196 KVM_GUESTDBG_USE_HW_BP | \
4197 KVM_GUESTDBG_ENABLE)
4199 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4200 struct kvm_guest_debug *dbg)
4206 vcpu->guest_debug = 0;
4207 kvm_s390_clear_bp_data(vcpu);
4209 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4213 if (!sclp.has_gpere) {
4218 if (dbg->control & KVM_GUESTDBG_ENABLE) {
4219 vcpu->guest_debug = dbg->control;
4220 /* enforce guest PER */
4221 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4223 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4224 rc = kvm_s390_import_bp_data(vcpu, dbg);
4226 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4227 vcpu->arch.guestdbg.last_bp = 0;
4231 vcpu->guest_debug = 0;
4232 kvm_s390_clear_bp_data(vcpu);
4233 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4241 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4242 struct kvm_mp_state *mp_state)
4248 /* CHECK_STOP and LOAD are not supported yet */
4249 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4250 KVM_MP_STATE_OPERATING;
4256 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4257 struct kvm_mp_state *mp_state)
4263 /* user space knows about this interface - let it control the state */
4264 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4266 switch (mp_state->mp_state) {
4267 case KVM_MP_STATE_STOPPED:
4268 rc = kvm_s390_vcpu_stop(vcpu);
4270 case KVM_MP_STATE_OPERATING:
4271 rc = kvm_s390_vcpu_start(vcpu);
4273 case KVM_MP_STATE_LOAD:
4274 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4278 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4280 case KVM_MP_STATE_CHECK_STOP:
4281 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
4290 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4292 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4295 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4298 kvm_s390_vcpu_request_handled(vcpu);
4299 if (!kvm_request_pending(vcpu))
4302 * If the guest prefix changed, re-arm the ipte notifier for the
4303 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4304 * This ensures that the ipte instruction for this request has
4305 * already finished. We might race against a second unmapper that
4306 * wants to set the blocking bit. Lets just retry the request loop.
4308 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4310 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4311 kvm_s390_get_prefix(vcpu),
4312 PAGE_SIZE * 2, PROT_WRITE);
4314 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4320 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4321 vcpu->arch.sie_block->ihcpu = 0xffff;
4325 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4326 if (!ibs_enabled(vcpu)) {
4327 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4328 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4333 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4334 if (ibs_enabled(vcpu)) {
4335 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4336 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4341 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4342 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4346 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4348 * Disable CMM virtualization; we will emulate the ESSA
4349 * instruction manually, in order to provide additional
4350 * functionalities needed for live migration.
4352 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4356 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4358 * Re-enable CMM virtualization if CMMA is available and
4359 * CMM has been used.
4361 if ((vcpu->kvm->arch.use_cmma) &&
4362 (vcpu->kvm->mm->context.uses_cmm))
4363 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4367 /* we left the vsie handler, nothing to do, just clear the request */
4368 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4373 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4375 struct kvm_vcpu *vcpu;
4376 union tod_clock clk;
4381 store_tod_clock_ext(&clk);
4383 kvm->arch.epoch = gtod->tod - clk.tod;
4385 if (test_kvm_facility(kvm, 139)) {
4386 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4387 if (kvm->arch.epoch > gtod->tod)
4388 kvm->arch.epdx -= 1;
4391 kvm_s390_vcpu_block_all(kvm);
4392 kvm_for_each_vcpu(i, vcpu, kvm) {
4393 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4394 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4397 kvm_s390_vcpu_unblock_all(kvm);
4401 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4403 if (!mutex_trylock(&kvm->lock))
4405 __kvm_s390_set_tod_clock(kvm, gtod);
4406 mutex_unlock(&kvm->lock);
4411 * kvm_arch_fault_in_page - fault-in guest page if necessary
4412 * @vcpu: The corresponding virtual cpu
4413 * @gpa: Guest physical address
4414 * @writable: Whether the page should be writable or not
4416 * Make sure that a guest page has been faulted-in on the host.
4418 * Return: Zero on success, negative error code otherwise.
4420 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
4422 return gmap_fault(vcpu->arch.gmap, gpa,
4423 writable ? FAULT_FLAG_WRITE : 0);
4426 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4427 unsigned long token)
4429 struct kvm_s390_interrupt inti;
4430 struct kvm_s390_irq irq;
4433 irq.u.ext.ext_params2 = token;
4434 irq.type = KVM_S390_INT_PFAULT_INIT;
4435 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4437 inti.type = KVM_S390_INT_PFAULT_DONE;
4438 inti.parm64 = token;
4439 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4443 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4444 struct kvm_async_pf *work)
4446 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4447 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4452 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4453 struct kvm_async_pf *work)
4455 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4456 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4459 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4460 struct kvm_async_pf *work)
4462 /* s390 will always inject the page directly */
4465 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4468 * s390 will always inject the page directly,
4469 * but we still want check_async_completion to cleanup
4474 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4477 struct kvm_arch_async_pf arch;
4479 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4481 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4482 vcpu->arch.pfault_compare)
4484 if (psw_extint_disabled(vcpu))
4486 if (kvm_s390_vcpu_has_irq(vcpu, 0))
4488 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4490 if (!vcpu->arch.gmap->pfault_enabled)
4493 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4494 hva += current->thread.gmap_addr & ~PAGE_MASK;
4495 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4498 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4501 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4506 * On s390 notifications for arriving pages will be delivered directly
4507 * to the guest but the house keeping for completed pfaults is
4508 * handled outside the worker.
4510 kvm_check_async_pf_completion(vcpu);
4512 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4513 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4518 if (!kvm_is_ucontrol(vcpu->kvm)) {
4519 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4524 rc = kvm_s390_handle_requests(vcpu);
4528 if (guestdbg_enabled(vcpu)) {
4529 kvm_s390_backup_guest_per_regs(vcpu);
4530 kvm_s390_patch_guest_per_regs(vcpu);
4533 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4535 vcpu->arch.sie_block->icptcode = 0;
4536 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4537 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4538 trace_kvm_s390_sie_enter(vcpu, cpuflags);
4543 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4545 struct kvm_s390_pgm_info pgm_info = {
4546 .code = PGM_ADDRESSING,
4551 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4552 trace_kvm_s390_sie_fault(vcpu);
4555 * We want to inject an addressing exception, which is defined as a
4556 * suppressing or terminating exception. However, since we came here
4557 * by a DAT access exception, the PSW still points to the faulting
4558 * instruction since DAT exceptions are nullifying. So we've got
4559 * to look up the current opcode to get the length of the instruction
4560 * to be able to forward the PSW.
4562 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4563 ilen = insn_length(opcode);
4567 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4568 * Forward by arbitrary ilc, injection will take care of
4569 * nullification if necessary.
4571 pgm_info = vcpu->arch.pgm;
4574 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4575 kvm_s390_forward_psw(vcpu, ilen);
4576 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4579 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4581 struct mcck_volatile_info *mcck_info;
4582 struct sie_page *sie_page;
4584 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4585 vcpu->arch.sie_block->icptcode);
4586 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4588 if (guestdbg_enabled(vcpu))
4589 kvm_s390_restore_guest_per_regs(vcpu);
4591 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4592 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4594 if (exit_reason == -EINTR) {
4595 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4596 sie_page = container_of(vcpu->arch.sie_block,
4597 struct sie_page, sie_block);
4598 mcck_info = &sie_page->mcck_info;
4599 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4603 if (vcpu->arch.sie_block->icptcode > 0) {
4604 int rc = kvm_handle_sie_intercept(vcpu);
4606 if (rc != -EOPNOTSUPP)
4608 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4609 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4610 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4611 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4613 } else if (exit_reason != -EFAULT) {
4614 vcpu->stat.exit_null++;
4616 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4617 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4618 vcpu->run->s390_ucontrol.trans_exc_code =
4619 current->thread.gmap_addr;
4620 vcpu->run->s390_ucontrol.pgm_code = 0x10;
4622 } else if (current->thread.gmap_pfault) {
4623 trace_kvm_s390_major_guest_pfault(vcpu);
4624 current->thread.gmap_pfault = 0;
4625 if (kvm_arch_setup_async_pf(vcpu))
4627 vcpu->stat.pfault_sync++;
4628 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4630 return vcpu_post_run_fault_in_sie(vcpu);
4633 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4634 static int __vcpu_run(struct kvm_vcpu *vcpu)
4636 int rc, exit_reason;
4637 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4640 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4641 * ning the guest), so that memslots (and other stuff) are protected
4643 kvm_vcpu_srcu_read_lock(vcpu);
4646 rc = vcpu_pre_run(vcpu);
4650 kvm_vcpu_srcu_read_unlock(vcpu);
4652 * As PF_VCPU will be used in fault handler, between
4653 * guest_enter and guest_exit should be no uaccess.
4655 local_irq_disable();
4656 guest_enter_irqoff();
4657 __disable_cpu_timer_accounting(vcpu);
4659 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4660 memcpy(sie_page->pv_grregs,
4661 vcpu->run->s.regs.gprs,
4662 sizeof(sie_page->pv_grregs));
4664 if (test_cpu_flag(CIF_FPU))
4666 exit_reason = sie64a(vcpu->arch.sie_block,
4667 vcpu->run->s.regs.gprs);
4668 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4669 memcpy(vcpu->run->s.regs.gprs,
4670 sie_page->pv_grregs,
4671 sizeof(sie_page->pv_grregs));
4673 * We're not allowed to inject interrupts on intercepts
4674 * that leave the guest state in an "in-between" state
4675 * where the next SIE entry will do a continuation.
4676 * Fence interrupts in our "internal" PSW.
4678 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4679 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4680 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4683 local_irq_disable();
4684 __enable_cpu_timer_accounting(vcpu);
4685 guest_exit_irqoff();
4687 kvm_vcpu_srcu_read_lock(vcpu);
4689 rc = vcpu_post_run(vcpu, exit_reason);
4690 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4692 kvm_vcpu_srcu_read_unlock(vcpu);
4696 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4698 struct kvm_run *kvm_run = vcpu->run;
4699 struct runtime_instr_cb *riccb;
4702 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4703 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4704 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4705 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4706 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4707 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4708 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4709 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4711 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4712 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4713 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4714 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4715 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4716 kvm_clear_async_pf_completion_queue(vcpu);
4718 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4719 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4720 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4721 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4724 * If userspace sets the riccb (e.g. after migration) to a valid state,
4725 * we should enable RI here instead of doing the lazy enablement.
4727 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4728 test_kvm_facility(vcpu->kvm, 64) &&
4730 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4731 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4732 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4735 * If userspace sets the gscb (e.g. after migration) to non-zero,
4736 * we should enable GS here instead of doing the lazy enablement.
4738 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4739 test_kvm_facility(vcpu->kvm, 133) &&
4741 !vcpu->arch.gs_enabled) {
4742 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4743 vcpu->arch.sie_block->ecb |= ECB_GS;
4744 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4745 vcpu->arch.gs_enabled = 1;
4747 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4748 test_kvm_facility(vcpu->kvm, 82)) {
4749 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4750 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4752 if (MACHINE_HAS_GS) {
4754 __ctl_set_bit(2, 4);
4755 if (current->thread.gs_cb) {
4756 vcpu->arch.host_gscb = current->thread.gs_cb;
4757 save_gs_cb(vcpu->arch.host_gscb);
4759 if (vcpu->arch.gs_enabled) {
4760 current->thread.gs_cb = (struct gs_cb *)
4761 &vcpu->run->s.regs.gscb;
4762 restore_gs_cb(current->thread.gs_cb);
4766 /* SIE will load etoken directly from SDNX and therefore kvm_run */
4769 static void sync_regs(struct kvm_vcpu *vcpu)
4771 struct kvm_run *kvm_run = vcpu->run;
4773 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4774 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4775 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4776 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4777 /* some control register changes require a tlb flush */
4778 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4780 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4781 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4782 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4784 save_access_regs(vcpu->arch.host_acrs);
4785 restore_access_regs(vcpu->run->s.regs.acrs);
4786 /* save host (userspace) fprs/vrs */
4788 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4789 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4791 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4793 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4794 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4795 if (test_fp_ctl(current->thread.fpu.fpc))
4796 /* User space provided an invalid FPC, let's clear it */
4797 current->thread.fpu.fpc = 0;
4799 /* Sync fmt2 only data */
4800 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4801 sync_regs_fmt2(vcpu);
4804 * In several places we have to modify our internal view to
4805 * not do things that are disallowed by the ultravisor. For
4806 * example we must not inject interrupts after specific exits
4807 * (e.g. 112 prefix page not secure). We do this by turning
4808 * off the machine check, external and I/O interrupt bits
4809 * of our PSW copy. To avoid getting validity intercepts, we
4810 * do only accept the condition code from userspace.
4812 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4813 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4817 kvm_run->kvm_dirty_regs = 0;
4820 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4822 struct kvm_run *kvm_run = vcpu->run;
4824 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4825 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4826 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4827 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4828 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4829 if (MACHINE_HAS_GS) {
4831 __ctl_set_bit(2, 4);
4832 if (vcpu->arch.gs_enabled)
4833 save_gs_cb(current->thread.gs_cb);
4834 current->thread.gs_cb = vcpu->arch.host_gscb;
4835 restore_gs_cb(vcpu->arch.host_gscb);
4836 if (!vcpu->arch.host_gscb)
4837 __ctl_clear_bit(2, 4);
4838 vcpu->arch.host_gscb = NULL;
4841 /* SIE will save etoken directly into SDNX and therefore kvm_run */
4844 static void store_regs(struct kvm_vcpu *vcpu)
4846 struct kvm_run *kvm_run = vcpu->run;
4848 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4849 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4850 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4851 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4852 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4853 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4854 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4855 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4856 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4857 save_access_regs(vcpu->run->s.regs.acrs);
4858 restore_access_regs(vcpu->arch.host_acrs);
4859 /* Save guest register state */
4861 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4862 /* Restore will be done lazily at return */
4863 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4864 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4865 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4866 store_regs_fmt2(vcpu);
4869 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4871 struct kvm_run *kvm_run = vcpu->run;
4875 * Running a VM while dumping always has the potential to
4876 * produce inconsistent dump data. But for PV vcpus a SIE
4877 * entry while dumping could also lead to a fatal validity
4878 * intercept which we absolutely want to avoid.
4880 if (vcpu->kvm->arch.pv.dumping)
4883 if (kvm_run->immediate_exit)
4886 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4887 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4892 if (guestdbg_exit_pending(vcpu)) {
4893 kvm_s390_prepare_debug_exit(vcpu);
4898 kvm_sigset_activate(vcpu);
4901 * no need to check the return value of vcpu_start as it can only have
4902 * an error for protvirt, but protvirt means user cpu state
4904 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4905 kvm_s390_vcpu_start(vcpu);
4906 } else if (is_vcpu_stopped(vcpu)) {
4907 pr_err_ratelimited("can't run stopped vcpu %d\n",
4914 enable_cpu_timer_accounting(vcpu);
4917 rc = __vcpu_run(vcpu);
4919 if (signal_pending(current) && !rc) {
4920 kvm_run->exit_reason = KVM_EXIT_INTR;
4924 if (guestdbg_exit_pending(vcpu) && !rc) {
4925 kvm_s390_prepare_debug_exit(vcpu);
4929 if (rc == -EREMOTE) {
4930 /* userspace support is needed, kvm_run has been prepared */
4934 disable_cpu_timer_accounting(vcpu);
4937 kvm_sigset_deactivate(vcpu);
4939 vcpu->stat.exit_userspace++;
4946 * store status at address
4947 * we use have two special cases:
4948 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4949 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4951 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4953 unsigned char archmode = 1;
4954 freg_t fprs[NUM_FPRS];
4959 px = kvm_s390_get_prefix(vcpu);
4960 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4961 if (write_guest_abs(vcpu, 163, &archmode, 1))
4964 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4965 if (write_guest_real(vcpu, 163, &archmode, 1))
4969 gpa -= __LC_FPREGS_SAVE_AREA;
4971 /* manually convert vector registers if necessary */
4972 if (MACHINE_HAS_VX) {
4973 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4974 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4977 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4978 vcpu->run->s.regs.fprs, 128);
4980 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4981 vcpu->run->s.regs.gprs, 128);
4982 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4983 &vcpu->arch.sie_block->gpsw, 16);
4984 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4986 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
4987 &vcpu->run->s.regs.fpc, 4);
4988 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4989 &vcpu->arch.sie_block->todpr, 4);
4990 cputm = kvm_s390_get_cpu_timer(vcpu);
4991 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4993 clkcomp = vcpu->arch.sie_block->ckc >> 8;
4994 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4996 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4997 &vcpu->run->s.regs.acrs, 64);
4998 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4999 &vcpu->arch.sie_block->gcr, 128);
5000 return rc ? -EFAULT : 0;
5003 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5006 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5007 * switch in the run ioctl. Let's update our copies before we save
5008 * it into the save area
5011 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5012 save_access_regs(vcpu->run->s.regs.acrs);
5014 return kvm_s390_store_status_unloaded(vcpu, addr);
5017 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5019 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5020 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5023 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5026 struct kvm_vcpu *vcpu;
5028 kvm_for_each_vcpu(i, vcpu, kvm) {
5029 __disable_ibs_on_vcpu(vcpu);
5033 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5037 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5038 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5041 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5043 int i, online_vcpus, r = 0, started_vcpus = 0;
5045 if (!is_vcpu_stopped(vcpu))
5048 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5049 /* Only one cpu at a time may enter/leave the STOPPED state. */
5050 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5051 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5053 /* Let's tell the UV that we want to change into the operating state */
5054 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5055 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5057 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5062 for (i = 0; i < online_vcpus; i++) {
5063 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5067 if (started_vcpus == 0) {
5068 /* we're the only active VCPU -> speed it up */
5069 __enable_ibs_on_vcpu(vcpu);
5070 } else if (started_vcpus == 1) {
5072 * As we are starting a second VCPU, we have to disable
5073 * the IBS facility on all VCPUs to remove potentially
5074 * outstanding ENABLE requests.
5076 __disable_ibs_on_all_vcpus(vcpu->kvm);
5079 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5081 * The real PSW might have changed due to a RESTART interpreted by the
5082 * ultravisor. We block all interrupts and let the next sie exit
5085 if (kvm_s390_pv_cpu_is_protected(vcpu))
5086 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5088 * Another VCPU might have used IBS while we were offline.
5089 * Let's play safe and flush the VCPU at startup.
5091 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5092 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5096 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5098 int i, online_vcpus, r = 0, started_vcpus = 0;
5099 struct kvm_vcpu *started_vcpu = NULL;
5101 if (is_vcpu_stopped(vcpu))
5104 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5105 /* Only one cpu at a time may enter/leave the STOPPED state. */
5106 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5107 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5109 /* Let's tell the UV that we want to change into the stopped state */
5110 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5111 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5113 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5119 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5120 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5121 * have been fully processed. This will ensure that the VCPU
5122 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5124 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5125 kvm_s390_clear_stop_irq(vcpu);
5127 __disable_ibs_on_vcpu(vcpu);
5129 for (i = 0; i < online_vcpus; i++) {
5130 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5132 if (!is_vcpu_stopped(tmp)) {
5138 if (started_vcpus == 1) {
5140 * As we only have one VCPU left, we want to enable the
5141 * IBS facility for that VCPU to speed it up.
5143 __enable_ibs_on_vcpu(started_vcpu);
5146 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5150 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5151 struct kvm_enable_cap *cap)
5159 case KVM_CAP_S390_CSS_SUPPORT:
5160 if (!vcpu->kvm->arch.css_support) {
5161 vcpu->kvm->arch.css_support = 1;
5162 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5163 trace_kvm_s390_enable_css(vcpu->kvm);
5174 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5175 struct kvm_s390_mem_op *mop)
5177 void __user *uaddr = (void __user *)mop->buf;
5180 if (mop->flags || !mop->size)
5182 if (mop->size + mop->sida_offset < mop->size)
5184 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5186 if (!kvm_s390_pv_cpu_is_protected(vcpu))
5190 case KVM_S390_MEMOP_SIDA_READ:
5191 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
5192 mop->sida_offset), mop->size))
5196 case KVM_S390_MEMOP_SIDA_WRITE:
5197 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
5198 mop->sida_offset), uaddr, mop->size))
5205 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5206 struct kvm_s390_mem_op *mop)
5208 void __user *uaddr = (void __user *)mop->buf;
5209 void *tmpbuf = NULL;
5211 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
5212 | KVM_S390_MEMOP_F_CHECK_ONLY
5213 | KVM_S390_MEMOP_F_SKEY_PROTECTION;
5215 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
5217 if (mop->size > MEM_OP_MAX_SIZE)
5219 if (kvm_s390_pv_cpu_is_protected(vcpu))
5221 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
5222 if (access_key_invalid(mop->key))
5227 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5228 tmpbuf = vmalloc(mop->size);
5234 case KVM_S390_MEMOP_LOGICAL_READ:
5235 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5236 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5237 GACC_FETCH, mop->key);
5240 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5241 mop->size, mop->key);
5243 if (copy_to_user(uaddr, tmpbuf, mop->size))
5247 case KVM_S390_MEMOP_LOGICAL_WRITE:
5248 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5249 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5250 GACC_STORE, mop->key);
5253 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5257 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5258 mop->size, mop->key);
5262 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5263 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5269 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5270 struct kvm_s390_mem_op *mop)
5274 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5277 case KVM_S390_MEMOP_LOGICAL_READ:
5278 case KVM_S390_MEMOP_LOGICAL_WRITE:
5279 r = kvm_s390_vcpu_mem_op(vcpu, mop);
5281 case KVM_S390_MEMOP_SIDA_READ:
5282 case KVM_S390_MEMOP_SIDA_WRITE:
5283 /* we are locked against sida going away by the vcpu->mutex */
5284 r = kvm_s390_vcpu_sida_op(vcpu, mop);
5290 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5294 long kvm_arch_vcpu_async_ioctl(struct file *filp,
5295 unsigned int ioctl, unsigned long arg)
5297 struct kvm_vcpu *vcpu = filp->private_data;
5298 void __user *argp = (void __user *)arg;
5301 case KVM_S390_IRQ: {
5302 struct kvm_s390_irq s390irq;
5304 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5306 return kvm_s390_inject_vcpu(vcpu, &s390irq);
5308 case KVM_S390_INTERRUPT: {
5309 struct kvm_s390_interrupt s390int;
5310 struct kvm_s390_irq s390irq = {};
5312 if (copy_from_user(&s390int, argp, sizeof(s390int)))
5314 if (s390int_to_s390irq(&s390int, &s390irq))
5316 return kvm_s390_inject_vcpu(vcpu, &s390irq);
5319 return -ENOIOCTLCMD;
5322 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5323 struct kvm_pv_cmd *cmd)
5325 struct kvm_s390_pv_dmp dmp;
5329 /* Dump initialization is a prerequisite */
5330 if (!vcpu->kvm->arch.pv.dumping)
5333 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5336 /* We only handle this subcmd right now */
5337 if (dmp.subcmd != KVM_PV_DUMP_CPU)
5340 /* CPU dump length is the same as create cpu storage donation. */
5341 if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5344 data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5348 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5350 VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5351 vcpu->vcpu_id, cmd->rc, cmd->rrc);
5356 /* On success copy over the dump data */
5357 if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5364 long kvm_arch_vcpu_ioctl(struct file *filp,
5365 unsigned int ioctl, unsigned long arg)
5367 struct kvm_vcpu *vcpu = filp->private_data;
5368 void __user *argp = (void __user *)arg;
5376 case KVM_S390_STORE_STATUS:
5377 idx = srcu_read_lock(&vcpu->kvm->srcu);
5378 r = kvm_s390_store_status_unloaded(vcpu, arg);
5379 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5381 case KVM_S390_SET_INITIAL_PSW: {
5385 if (copy_from_user(&psw, argp, sizeof(psw)))
5387 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5390 case KVM_S390_CLEAR_RESET:
5392 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5393 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5394 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5395 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5396 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5400 case KVM_S390_INITIAL_RESET:
5402 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5403 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5404 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5405 UVC_CMD_CPU_RESET_INITIAL,
5407 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5411 case KVM_S390_NORMAL_RESET:
5413 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5414 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5415 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5416 UVC_CMD_CPU_RESET, &rc, &rrc);
5417 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5421 case KVM_SET_ONE_REG:
5422 case KVM_GET_ONE_REG: {
5423 struct kvm_one_reg reg;
5425 if (kvm_s390_pv_cpu_is_protected(vcpu))
5428 if (copy_from_user(®, argp, sizeof(reg)))
5430 if (ioctl == KVM_SET_ONE_REG)
5431 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
5433 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
5436 #ifdef CONFIG_KVM_S390_UCONTROL
5437 case KVM_S390_UCAS_MAP: {
5438 struct kvm_s390_ucas_mapping ucasmap;
5440 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5445 if (!kvm_is_ucontrol(vcpu->kvm)) {
5450 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5451 ucasmap.vcpu_addr, ucasmap.length);
5454 case KVM_S390_UCAS_UNMAP: {
5455 struct kvm_s390_ucas_mapping ucasmap;
5457 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5462 if (!kvm_is_ucontrol(vcpu->kvm)) {
5467 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5472 case KVM_S390_VCPU_FAULT: {
5473 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5476 case KVM_ENABLE_CAP:
5478 struct kvm_enable_cap cap;
5480 if (copy_from_user(&cap, argp, sizeof(cap)))
5482 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5485 case KVM_S390_MEM_OP: {
5486 struct kvm_s390_mem_op mem_op;
5488 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5489 r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5494 case KVM_S390_SET_IRQ_STATE: {
5495 struct kvm_s390_irq_state irq_state;
5498 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5500 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5501 irq_state.len == 0 ||
5502 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5506 /* do not use irq_state.flags, it will break old QEMUs */
5507 r = kvm_s390_set_irq_state(vcpu,
5508 (void __user *) irq_state.buf,
5512 case KVM_S390_GET_IRQ_STATE: {
5513 struct kvm_s390_irq_state irq_state;
5516 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5518 if (irq_state.len == 0) {
5522 /* do not use irq_state.flags, it will break old QEMUs */
5523 r = kvm_s390_get_irq_state(vcpu,
5524 (__u8 __user *) irq_state.buf,
5528 case KVM_S390_PV_CPU_COMMAND: {
5529 struct kvm_pv_cmd cmd;
5532 if (!is_prot_virt_host())
5536 if (copy_from_user(&cmd, argp, sizeof(cmd)))
5543 /* We only handle this cmd right now */
5544 if (cmd.cmd != KVM_PV_DUMP)
5547 r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5549 /* Always copy over UV rc / rrc data */
5550 if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5551 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5563 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5565 #ifdef CONFIG_KVM_S390_UCONTROL
5566 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5567 && (kvm_is_ucontrol(vcpu->kvm))) {
5568 vmf->page = virt_to_page(vcpu->arch.sie_block);
5569 get_page(vmf->page);
5573 return VM_FAULT_SIGBUS;
5576 /* Section: memory related */
5577 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5578 const struct kvm_memory_slot *old,
5579 struct kvm_memory_slot *new,
5580 enum kvm_mr_change change)
5584 /* When we are protected, we should not change the memory slots */
5585 if (kvm_s390_pv_get_handle(kvm))
5588 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5590 * A few sanity checks. We can have memory slots which have to be
5591 * located/ended at a segment boundary (1MB). The memory in userland is
5592 * ok to be fragmented into various different vmas. It is okay to mmap()
5593 * and munmap() stuff in this slot after doing this call at any time
5596 if (new->userspace_addr & 0xffffful)
5599 size = new->npages * PAGE_SIZE;
5600 if (size & 0xffffful)
5603 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5607 if (!kvm->arch.migration_mode)
5611 * Turn off migration mode when:
5612 * - userspace creates a new memslot with dirty logging off,
5613 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5614 * dirty logging is turned off.
5615 * Migration mode expects dirty page logging being enabled to store
5618 if (change != KVM_MR_DELETE &&
5619 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5620 WARN(kvm_s390_vm_stop_migration(kvm),
5621 "Failed to stop migration mode");
5626 void kvm_arch_commit_memory_region(struct kvm *kvm,
5627 struct kvm_memory_slot *old,
5628 const struct kvm_memory_slot *new,
5629 enum kvm_mr_change change)
5635 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5636 old->npages * PAGE_SIZE);
5639 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5640 old->npages * PAGE_SIZE);
5645 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5646 new->base_gfn * PAGE_SIZE,
5647 new->npages * PAGE_SIZE);
5649 case KVM_MR_FLAGS_ONLY:
5652 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5655 pr_warn("failed to commit memory region\n");
5659 static inline unsigned long nonhyp_mask(int i)
5661 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5663 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5666 static int __init kvm_s390_init(void)
5670 if (!sclp.has_sief2) {
5671 pr_info("SIE is not available\n");
5675 if (nested && hpage) {
5676 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5680 for (i = 0; i < 16; i++)
5681 kvm_s390_fac_base[i] |=
5682 stfle_fac_list[i] & nonhyp_mask(i);
5684 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5687 static void __exit kvm_s390_exit(void)
5692 module_init(kvm_s390_init);
5693 module_exit(kvm_s390_exit);
5696 * Enable autoloading of the kvm module.
5697 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5698 * since x86 takes a different approach.
5700 #include <linux/miscdevice.h>
5701 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5702 MODULE_ALIAS("devname:kvm");