1 // SPDX-License-Identifier: GPL-2.0
3 * handling privileged instructions
5 * Copyright IBM Corp. 2008, 2018
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/compat.h>
15 #include <linux/mm_types.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/facility.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/pgtable.h>
24 #include <asm/page-states.h>
25 #include <asm/pgalloc.h>
28 #include <asm/ptrace.h>
35 static int handle_ri(struct kvm_vcpu *vcpu)
37 vcpu->stat.instruction_ri++;
39 if (test_kvm_facility(vcpu->kvm, 64)) {
40 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
41 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
42 kvm_s390_retry_instr(vcpu);
45 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
48 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
50 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
51 return handle_ri(vcpu);
56 static int handle_gs(struct kvm_vcpu *vcpu)
58 vcpu->stat.instruction_gs++;
60 if (test_kvm_facility(vcpu->kvm, 133)) {
61 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
64 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
65 restore_gs_cb(current->thread.gs_cb);
67 vcpu->arch.sie_block->ecb |= ECB_GS;
68 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
69 vcpu->arch.gs_enabled = 1;
70 kvm_s390_retry_instr(vcpu);
73 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
76 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
78 int code = vcpu->arch.sie_block->ipb & 0xff;
80 if (code == 0x49 || code == 0x4d)
81 return handle_gs(vcpu);
85 /* Handle SCK (SET CLOCK) interception */
86 static int handle_set_clock(struct kvm_vcpu *vcpu)
88 struct kvm_s390_vm_tod_clock gtod = { 0 };
93 vcpu->stat.instruction_sck++;
95 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
96 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
98 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
99 if (op2 & 7) /* Operand must be on a doubleword boundary */
100 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
101 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
103 return kvm_s390_inject_prog_cond(vcpu, rc);
105 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
106 kvm_s390_set_tod_clock(vcpu->kvm, >od);
108 kvm_s390_set_psw_cc(vcpu, 0);
112 static int handle_set_prefix(struct kvm_vcpu *vcpu)
119 vcpu->stat.instruction_spx++;
121 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
122 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
124 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
126 /* must be word boundary */
128 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
131 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
133 return kvm_s390_inject_prog_cond(vcpu, rc);
135 address &= 0x7fffe000u;
138 * Make sure the new value is valid memory. We only need to check the
139 * first page, since address is 8k aligned and memory pieces are always
140 * at least 1MB aligned and have at least a size of 1MB.
142 if (kvm_is_error_gpa(vcpu->kvm, address))
143 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
145 kvm_s390_set_prefix(vcpu, address);
146 trace_kvm_s390_handle_prefix(vcpu, 1, address);
150 static int handle_store_prefix(struct kvm_vcpu *vcpu)
157 vcpu->stat.instruction_stpx++;
159 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
160 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
162 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
164 /* must be word boundary */
166 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
168 address = kvm_s390_get_prefix(vcpu);
171 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
173 return kvm_s390_inject_prog_cond(vcpu, rc);
175 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
176 trace_kvm_s390_handle_prefix(vcpu, 0, address);
180 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
182 u16 vcpu_id = vcpu->vcpu_id;
187 vcpu->stat.instruction_stap++;
189 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
190 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
192 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
195 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
197 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
199 return kvm_s390_inject_prog_cond(vcpu, rc);
201 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
202 trace_kvm_s390_handle_stap(vcpu, ga);
206 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
210 trace_kvm_s390_skey_related_inst(vcpu);
211 /* Already enabled? */
212 if (vcpu->arch.skey_enabled)
215 rc = s390_enable_skey();
216 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
220 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
221 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
222 if (!vcpu->kvm->arch.use_skf)
223 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
225 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
226 vcpu->arch.skey_enabled = true;
230 static int try_handle_skey(struct kvm_vcpu *vcpu)
234 rc = kvm_s390_skey_check_enable(vcpu);
237 if (vcpu->kvm->arch.use_skf) {
238 /* with storage-key facility, SIE interprets it for us */
239 kvm_s390_retry_instr(vcpu);
240 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
246 static int handle_iske(struct kvm_vcpu *vcpu)
248 unsigned long gaddr, vmaddr;
254 vcpu->stat.instruction_iske++;
256 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
257 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
259 rc = try_handle_skey(vcpu);
261 return rc != -EAGAIN ? rc : 0;
263 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
265 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
266 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
267 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
268 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
269 if (kvm_is_error_hva(vmaddr))
270 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
273 down_read(¤t->mm->mmap_sem);
274 rc = get_guest_storage_key(current->mm, vmaddr, &key);
277 rc = fixup_user_fault(current, current->mm, vmaddr,
278 FAULT_FLAG_WRITE, &unlocked);
280 up_read(¤t->mm->mmap_sem);
284 up_read(¤t->mm->mmap_sem);
286 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
289 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
290 vcpu->run->s.regs.gprs[reg1] |= key;
294 static int handle_rrbe(struct kvm_vcpu *vcpu)
296 unsigned long vmaddr, gaddr;
301 vcpu->stat.instruction_rrbe++;
303 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
304 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
306 rc = try_handle_skey(vcpu);
308 return rc != -EAGAIN ? rc : 0;
310 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
312 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
313 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
314 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
315 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
316 if (kvm_is_error_hva(vmaddr))
317 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
320 down_read(¤t->mm->mmap_sem);
321 rc = reset_guest_reference_bit(current->mm, vmaddr);
323 rc = fixup_user_fault(current, current->mm, vmaddr,
324 FAULT_FLAG_WRITE, &unlocked);
326 up_read(¤t->mm->mmap_sem);
330 up_read(¤t->mm->mmap_sem);
332 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
335 kvm_s390_set_psw_cc(vcpu, rc);
343 static int handle_sske(struct kvm_vcpu *vcpu)
345 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
346 unsigned long start, end;
347 unsigned char key, oldkey;
352 vcpu->stat.instruction_sske++;
354 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
355 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
357 rc = try_handle_skey(vcpu);
359 return rc != -EAGAIN ? rc : 0;
361 if (!test_kvm_facility(vcpu->kvm, 8))
363 if (!test_kvm_facility(vcpu->kvm, 10))
364 m3 &= ~(SSKE_MC | SSKE_MR);
365 if (!test_kvm_facility(vcpu->kvm, 14))
368 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
370 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
371 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
372 start = kvm_s390_logical_to_effective(vcpu, start);
374 /* start already designates an absolute address */
375 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
377 start = kvm_s390_real_to_abs(vcpu, start);
378 end = start + PAGE_SIZE;
381 while (start != end) {
382 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
385 if (kvm_is_error_hva(vmaddr))
386 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
388 down_read(¤t->mm->mmap_sem);
389 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
390 m3 & SSKE_NQ, m3 & SSKE_MR,
394 rc = fixup_user_fault(current, current->mm, vmaddr,
395 FAULT_FLAG_WRITE, &unlocked);
396 rc = !rc ? -EAGAIN : rc;
398 up_read(¤t->mm->mmap_sem);
400 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
408 if (m3 & (SSKE_MC | SSKE_MR)) {
410 /* skey in reg1 is unpredictable */
411 kvm_s390_set_psw_cc(vcpu, 3);
413 kvm_s390_set_psw_cc(vcpu, rc);
414 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
415 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
419 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
420 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
422 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
423 end = kvm_s390_logical_to_effective(vcpu, end);
424 vcpu->run->s.regs.gprs[reg2] |= end;
429 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
431 vcpu->stat.instruction_ipte_interlock++;
432 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
433 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
434 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
435 kvm_s390_retry_instr(vcpu);
436 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
440 static int handle_test_block(struct kvm_vcpu *vcpu)
445 vcpu->stat.instruction_tb++;
447 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
448 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
450 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
451 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
452 addr = kvm_s390_logical_to_effective(vcpu, addr);
453 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
454 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
455 addr = kvm_s390_real_to_abs(vcpu, addr);
457 if (kvm_is_error_gpa(vcpu->kvm, addr))
458 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
460 * We don't expect errors on modern systems, and do not care
461 * about storage keys (yet), so let's just clear the page.
463 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
465 kvm_s390_set_psw_cc(vcpu, 0);
466 vcpu->run->s.regs.gprs[0] = 0;
470 static int handle_tpi(struct kvm_vcpu *vcpu)
472 struct kvm_s390_interrupt_info *inti;
479 vcpu->stat.instruction_tpi++;
481 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
483 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
485 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
487 kvm_s390_set_psw_cc(vcpu, 0);
491 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
492 tpi_data[1] = inti->io.io_int_parm;
493 tpi_data[2] = inti->io.io_int_word;
496 * Store the two-word I/O interruption code into the
499 len = sizeof(tpi_data) - 4;
500 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
502 rc = kvm_s390_inject_prog_cond(vcpu, rc);
503 goto reinject_interrupt;
507 * Store the three-word I/O interruption code into
508 * the appropriate lowcore area.
510 len = sizeof(tpi_data);
511 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
512 /* failed writes to the low core are not recoverable */
514 goto reinject_interrupt;
518 /* irq was successfully handed to the guest */
520 kvm_s390_set_psw_cc(vcpu, 1);
524 * If we encounter a problem storing the interruption code, the
525 * instruction is suppressed from the guest's view: reinject the
528 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
532 /* don't set the cc, a pgm irq was injected or we drop to user space */
533 return rc ? -EFAULT : 0;
536 static int handle_tsch(struct kvm_vcpu *vcpu)
538 struct kvm_s390_interrupt_info *inti = NULL;
539 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
541 vcpu->stat.instruction_tsch++;
543 /* a valid schid has at least one bit set */
544 if (vcpu->run->s.regs.gprs[1])
545 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
546 vcpu->run->s.regs.gprs[1]);
549 * Prepare exit to userspace.
550 * We indicate whether we dequeued a pending I/O interrupt
551 * so that userspace can re-inject it if the instruction gets
552 * a program check. While this may re-order the pending I/O
553 * interrupts, this is no problem since the priority is kept
556 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
557 vcpu->run->s390_tsch.dequeued = !!inti;
559 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
560 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
561 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
562 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
564 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
569 static int handle_io_inst(struct kvm_vcpu *vcpu)
571 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
573 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
574 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
576 if (vcpu->kvm->arch.css_support) {
578 * Most I/O instructions will be handled by userspace.
579 * Exceptions are tpi and the interrupt portion of tsch.
581 if (vcpu->arch.sie_block->ipa == 0xb236)
582 return handle_tpi(vcpu);
583 if (vcpu->arch.sie_block->ipa == 0xb235)
584 return handle_tsch(vcpu);
585 /* Handle in userspace. */
586 vcpu->stat.instruction_io_other++;
590 * Set condition code 3 to stop the guest from issuing channel
593 kvm_s390_set_psw_cc(vcpu, 3);
599 * handle_pqap: Handling pqap interception
600 * @vcpu: the vcpu having issue the pqap instruction
602 * We now support PQAP/AQIC instructions and we need to correctly
603 * answer the guest even if no dedicated driver's hook is available.
605 * The intercepting code calls a dedicated callback for this instruction
606 * if a driver did register one in the CRYPTO satellite of the
609 * If no callback is available, the queues are not available, return this
610 * response code to the caller and set CC to 3.
611 * Else return the response code returned by the callback.
613 static int handle_pqap(struct kvm_vcpu *vcpu)
615 struct ap_queue_status status = {};
620 /* Verify that the AP instruction are available */
621 if (!ap_instructions_available())
623 /* Verify that the guest is allowed to use AP instructions */
624 if (!(vcpu->arch.sie_block->eca & ECA_APIE))
627 * The only possibly intercepted functions when AP instructions are
628 * available for the guest are AQIC and TAPQ with the t bit set
629 * since we do not set IC.3 (FIII) we currently will only intercept
630 * the AQIC function code.
631 * Note: running nested under z/VM can result in intercepts for other
632 * function codes, e.g. PQAP(QCI). We do not support this and bail out.
634 reg0 = vcpu->run->s.regs.gprs[0];
635 fc = (reg0 >> 24) & 0xff;
639 /* PQAP instruction is allowed for guest kernel only */
640 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
641 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
643 /* Common PQAP instruction specification exceptions */
644 /* bits 41-47 must all be zeros */
645 if (reg0 & 0x007f0000UL)
646 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
647 /* APFT not install and T bit set */
648 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
649 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
650 /* APXA not installed and APID greater 64 or APQI greater 16 */
651 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
652 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
654 /* AQIC function code specific exception */
655 /* facility 65 not present for AQIC function code */
656 if (!test_kvm_facility(vcpu->kvm, 65))
657 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
660 * Verify that the hook callback is registered, lock the owner
663 if (vcpu->kvm->arch.crypto.pqap_hook) {
664 if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
666 ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
667 module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
668 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
669 kvm_s390_set_psw_cc(vcpu, 3);
673 * A vfio_driver must register a hook.
674 * No hook means no driver to enable the SIE CRYCB and no queues.
675 * We send this response to the guest.
677 status.response_code = 0x01;
678 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
679 kvm_s390_set_psw_cc(vcpu, 3);
683 static int handle_stfl(struct kvm_vcpu *vcpu)
688 vcpu->stat.instruction_stfl++;
690 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
691 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
694 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
695 * into a u32 memory representation. They will remain bits 0-31.
697 fac = *vcpu->kvm->arch.model.fac_list >> 32;
698 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
702 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
703 trace_kvm_s390_handle_stfl(vcpu, fac);
707 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
708 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
709 #define PSW_ADDR_24 0x0000000000ffffffUL
710 #define PSW_ADDR_31 0x000000007fffffffUL
712 int is_valid_psw(psw_t *psw)
714 if (psw->mask & PSW_MASK_UNASSIGNED)
716 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
717 if (psw->addr & ~PSW_ADDR_31)
720 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
722 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
729 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
731 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
732 psw_compat_t new_psw;
737 vcpu->stat.instruction_lpsw++;
739 if (gpsw->mask & PSW_MASK_PSTATE)
740 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
742 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
744 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
746 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
748 return kvm_s390_inject_prog_cond(vcpu, rc);
749 if (!(new_psw.mask & PSW32_MASK_BASE))
750 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
751 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
752 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
753 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
754 if (!is_valid_psw(gpsw))
755 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
759 static int handle_lpswe(struct kvm_vcpu *vcpu)
766 vcpu->stat.instruction_lpswe++;
768 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
769 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
771 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
773 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
774 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
776 return kvm_s390_inject_prog_cond(vcpu, rc);
777 vcpu->arch.sie_block->gpsw = new_psw;
778 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
779 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
783 static int handle_stidp(struct kvm_vcpu *vcpu)
785 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
790 vcpu->stat.instruction_stidp++;
792 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
793 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
795 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
798 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
800 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
802 return kvm_s390_inject_prog_cond(vcpu, rc);
804 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
808 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
813 cpus = atomic_read(&vcpu->kvm->online_vcpus);
815 /* deal with other level 3 hypervisors */
816 if (stsi(mem, 3, 2, 2))
820 for (n = mem->count - 1; n > 0 ; n--)
821 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
823 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
824 mem->vm[0].cpus_total = cpus;
825 mem->vm[0].cpus_configured = cpus;
826 mem->vm[0].cpus_standby = 0;
827 mem->vm[0].cpus_reserved = 0;
828 mem->vm[0].caf = 1000;
829 memcpy(mem->vm[0].name, "KVMguest", 8);
830 ASCEBC(mem->vm[0].name, 8);
831 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
832 ASCEBC(mem->vm[0].cpi, 16);
835 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
836 u8 fc, u8 sel1, u16 sel2)
838 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
839 vcpu->run->s390_stsi.addr = addr;
840 vcpu->run->s390_stsi.ar = ar;
841 vcpu->run->s390_stsi.fc = fc;
842 vcpu->run->s390_stsi.sel1 = sel1;
843 vcpu->run->s390_stsi.sel2 = sel2;
846 static int handle_stsi(struct kvm_vcpu *vcpu)
848 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
849 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
850 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
851 unsigned long mem = 0;
856 vcpu->stat.instruction_stsi++;
857 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
859 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
860 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
863 kvm_s390_set_psw_cc(vcpu, 3);
867 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
868 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
872 vcpu->run->s.regs.gprs[0] = 3 << 28;
873 kvm_s390_set_psw_cc(vcpu, 0);
877 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
879 if (operand2 & 0xfff)
880 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
883 case 1: /* same handling for 1 and 2 */
885 mem = get_zeroed_page(GFP_KERNEL);
888 if (stsi((void *) mem, fc, sel1, sel2))
892 if (sel1 != 2 || sel2 != 2)
894 mem = get_zeroed_page(GFP_KERNEL);
897 handle_stsi_3_2_2(vcpu, (void *) mem);
901 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
903 rc = kvm_s390_inject_prog_cond(vcpu, rc);
906 if (vcpu->kvm->arch.user_stsi) {
907 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
910 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
912 kvm_s390_set_psw_cc(vcpu, 0);
913 vcpu->run->s.regs.gprs[0] = 0;
916 kvm_s390_set_psw_cc(vcpu, 3);
922 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
924 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
926 return handle_stidp(vcpu);
928 return handle_set_clock(vcpu);
930 return handle_set_prefix(vcpu);
932 return handle_store_prefix(vcpu);
934 return handle_store_cpu_address(vcpu);
936 return kvm_s390_handle_vsie(vcpu);
939 return handle_ipte_interlock(vcpu);
941 return handle_iske(vcpu);
943 return handle_rrbe(vcpu);
945 return handle_sske(vcpu);
947 return handle_test_block(vcpu);
964 return handle_io_inst(vcpu);
966 return handle_sthyi(vcpu);
968 return handle_stsi(vcpu);
970 return handle_pqap(vcpu);
972 return handle_stfl(vcpu);
974 return handle_lpswe(vcpu);
980 static int handle_epsw(struct kvm_vcpu *vcpu)
984 vcpu->stat.instruction_epsw++;
986 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
988 /* This basically extracts the mask half of the psw. */
989 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
990 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
992 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
993 vcpu->run->s.regs.gprs[reg2] |=
994 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
999 #define PFMF_RESERVED 0xfffc0101UL
1000 #define PFMF_SK 0x00020000UL
1001 #define PFMF_CF 0x00010000UL
1002 #define PFMF_UI 0x00008000UL
1003 #define PFMF_FSC 0x00007000UL
1004 #define PFMF_NQ 0x00000800UL
1005 #define PFMF_MR 0x00000400UL
1006 #define PFMF_MC 0x00000200UL
1007 #define PFMF_KEY 0x000000feUL
1009 static int handle_pfmf(struct kvm_vcpu *vcpu)
1011 bool mr = false, mc = false, nq;
1013 unsigned long start, end;
1016 vcpu->stat.instruction_pfmf++;
1018 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1020 if (!test_kvm_facility(vcpu->kvm, 8))
1021 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1023 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1024 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1026 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1027 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1029 /* Only provide non-quiescing support if enabled for the guest */
1030 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1031 !test_kvm_facility(vcpu->kvm, 14))
1032 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1034 /* Only provide conditional-SSKE support if enabled for the guest */
1035 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1036 test_kvm_facility(vcpu->kvm, 10)) {
1037 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1038 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1041 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1042 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1043 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1044 start = kvm_s390_logical_to_effective(vcpu, start);
1046 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1047 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1048 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1051 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1053 /* only 4k frames specify a real address */
1054 start = kvm_s390_real_to_abs(vcpu, start);
1055 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1058 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1061 /* only support 2G frame size if EDAT2 is available and we are
1062 not in 24-bit addressing mode */
1063 if (!test_kvm_facility(vcpu->kvm, 78) ||
1064 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1065 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1066 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1069 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1072 while (start != end) {
1073 unsigned long vmaddr;
1074 bool unlocked = false;
1076 /* Translate guest address to host address */
1077 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1078 if (kvm_is_error_hva(vmaddr))
1079 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1081 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1082 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1083 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1086 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1087 int rc = kvm_s390_skey_check_enable(vcpu);
1091 down_read(¤t->mm->mmap_sem);
1092 rc = cond_set_guest_storage_key(current->mm, vmaddr,
1093 key, NULL, nq, mr, mc);
1095 rc = fixup_user_fault(current, current->mm, vmaddr,
1096 FAULT_FLAG_WRITE, &unlocked);
1097 rc = !rc ? -EAGAIN : rc;
1099 up_read(¤t->mm->mmap_sem);
1101 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1109 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1110 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1111 vcpu->run->s.regs.gprs[reg2] = end;
1113 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1114 end = kvm_s390_logical_to_effective(vcpu, end);
1115 vcpu->run->s.regs.gprs[reg2] |= end;
1122 * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
1124 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1126 int r1, r2, nappended, entries;
1127 unsigned long gfn, hva, res, pgstev, ptev;
1128 unsigned long *cbrlo;
1131 * We don't need to set SD.FPF.SK to 1 here, because if we have a
1132 * machine check here we either handle it or crash
1135 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1136 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1137 hva = gfn_to_hva(vcpu->kvm, gfn);
1138 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1140 if (kvm_is_error_hva(hva))
1141 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1143 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1144 if (nappended < 0) {
1145 res = orc ? 0x10 : 0;
1146 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1149 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1151 * Set the block-content state part of the result. 0 means resident, so
1152 * nothing to do if the page is valid. 2 is for preserved pages
1153 * (non-present and non-zero), and 3 for zero pages (non-present and
1156 if (ptev & _PAGE_INVALID) {
1158 if (pgstev & _PGSTE_GPS_ZERO)
1161 if (pgstev & _PGSTE_GPS_NODAT)
1163 vcpu->run->s.regs.gprs[r1] = res;
1165 * It is possible that all the normal 511 slots were full, in which case
1166 * we will now write in the 512th slot, which is reserved for host use.
1167 * In both cases we let the normal essa handling code process all the
1168 * slots, including the reserved one, if needed.
1170 if (nappended > 0) {
1171 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1172 cbrlo[entries] = gfn << PAGE_SHIFT;
1176 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1178 /* Increment only if we are really flipping the bit */
1179 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1180 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1186 static int handle_essa(struct kvm_vcpu *vcpu)
1188 /* entries expected to be 1FF */
1189 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1190 unsigned long *cbrlo;
1194 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1195 gmap = vcpu->arch.gmap;
1196 vcpu->stat.instruction_essa++;
1197 if (!vcpu->kvm->arch.use_cmma)
1198 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1201 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1202 /* Check for invalid operation request code */
1203 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1204 /* ORCs 0-6 are always valid */
1205 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1206 : ESSA_SET_STABLE_IF_RESIDENT))
1207 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1209 if (!vcpu->kvm->arch.migration_mode) {
1211 * CMMA is enabled in the KVM settings, but is disabled in
1212 * the SIE block and in the mm_context, and we are not doing
1213 * a migration. Enable CMMA in the mm_context.
1214 * Since we need to take a write lock to write to the context
1215 * to avoid races with storage keys handling, we check if the
1216 * value really needs to be written to; if the value is
1217 * already correct, we do nothing and avoid the lock.
1219 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1220 down_write(&vcpu->kvm->mm->mmap_sem);
1221 vcpu->kvm->mm->context.uses_cmm = 1;
1222 up_write(&vcpu->kvm->mm->mmap_sem);
1225 * If we are here, we are supposed to have CMMA enabled in
1226 * the SIE block. Enabling CMMA works on a per-CPU basis,
1227 * while the context use_cmma flag is per process.
1228 * It's possible that the context flag is enabled and the
1229 * SIE flag is not, so we set the flag always; if it was
1230 * already set, nothing changes, otherwise we enable it
1233 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1234 /* Retry the ESSA instruction */
1235 kvm_s390_retry_instr(vcpu);
1239 down_read(&vcpu->kvm->mm->mmap_sem);
1240 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1241 i = __do_essa(vcpu, orc);
1242 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1243 up_read(&vcpu->kvm->mm->mmap_sem);
1246 /* Account for the possible extra cbrl entry */
1249 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1250 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1251 down_read(&gmap->mm->mmap_sem);
1252 for (i = 0; i < entries; ++i)
1253 __gmap_zap(gmap, cbrlo[i]);
1254 up_read(&gmap->mm->mmap_sem);
1258 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1260 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1264 return handle_ipte_interlock(vcpu);
1266 return handle_epsw(vcpu);
1268 return handle_essa(vcpu);
1270 return handle_pfmf(vcpu);
1276 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1278 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1279 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1280 int reg, rc, nr_regs;
1285 vcpu->stat.instruction_lctl++;
1287 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1288 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1290 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1293 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1295 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1296 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1298 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1299 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1301 return kvm_s390_inject_prog_cond(vcpu, rc);
1305 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1306 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1309 reg = (reg + 1) % 16;
1311 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1315 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1317 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1318 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1319 int reg, rc, nr_regs;
1324 vcpu->stat.instruction_stctl++;
1326 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1327 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1329 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1332 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1334 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1335 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1340 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1343 reg = (reg + 1) % 16;
1345 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1346 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1349 static int handle_lctlg(struct kvm_vcpu *vcpu)
1351 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1352 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1353 int reg, rc, nr_regs;
1358 vcpu->stat.instruction_lctlg++;
1360 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1361 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1363 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1366 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1368 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1369 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1371 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1372 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1374 return kvm_s390_inject_prog_cond(vcpu, rc);
1378 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1381 reg = (reg + 1) % 16;
1383 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1387 static int handle_stctg(struct kvm_vcpu *vcpu)
1389 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1390 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1391 int reg, rc, nr_regs;
1396 vcpu->stat.instruction_stctg++;
1398 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1399 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1401 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1404 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1406 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1407 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1412 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1415 reg = (reg + 1) % 16;
1417 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1418 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1421 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1423 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1425 return handle_stctg(vcpu);
1427 return handle_lctlg(vcpu);
1431 return handle_ri(vcpu);
1437 static int handle_tprot(struct kvm_vcpu *vcpu)
1439 u64 address1, address2;
1440 unsigned long hva, gpa;
1441 int ret = 0, cc = 0;
1445 vcpu->stat.instruction_tprot++;
1447 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1448 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1450 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1452 /* we only handle the Linux memory detection case:
1454 * everything else goes to userspace. */
1455 if (address2 & 0xf0)
1457 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1459 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1460 if (ret == PGM_PROTECTION) {
1461 /* Write protected? Try again with read-only... */
1463 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1467 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1468 ret = kvm_s390_inject_program_int(vcpu, ret);
1469 } else if (ret > 0) {
1470 /* Translation not available */
1471 kvm_s390_set_psw_cc(vcpu, 3);
1477 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1478 if (kvm_is_error_hva(hva)) {
1479 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1482 cc = 1; /* Write not permitted ==> read-only */
1483 kvm_s390_set_psw_cc(vcpu, cc);
1484 /* Note: CC2 only occurs for storage keys (not supported yet) */
1487 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1492 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1494 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1496 return handle_tprot(vcpu);
1502 static int handle_sckpf(struct kvm_vcpu *vcpu)
1506 vcpu->stat.instruction_sckpf++;
1508 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1509 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1511 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1512 return kvm_s390_inject_program_int(vcpu,
1515 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1516 vcpu->arch.sie_block->todpr = value;
1521 static int handle_ptff(struct kvm_vcpu *vcpu)
1523 vcpu->stat.instruction_ptff++;
1525 /* we don't emulate any control instructions yet */
1526 kvm_s390_set_psw_cc(vcpu, 3);
1530 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1532 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1534 return handle_ptff(vcpu);
1536 return handle_sckpf(vcpu);