GNU Linux-libre 4.9.330-gnu1
[releases.git] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/gmap.h>
27 #include <asm/io.h>
28 #include <asm/ptrace.h>
29 #include <asm/compat.h>
30 #include <asm/sclp.h>
31 #include "gaccess.h"
32 #include "kvm-s390.h"
33 #include "trace.h"
34
35 static int handle_ri(struct kvm_vcpu *vcpu)
36 {
37         if (test_kvm_facility(vcpu->kvm, 64)) {
38                 vcpu->arch.sie_block->ecb3 |= 0x01;
39                 kvm_s390_retry_instr(vcpu);
40                 return 0;
41         } else
42                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
43 }
44
45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
46 {
47         if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
48                 return handle_ri(vcpu);
49         else
50                 return -EOPNOTSUPP;
51 }
52
53 /* Handle SCK (SET CLOCK) interception */
54 static int handle_set_clock(struct kvm_vcpu *vcpu)
55 {
56         int rc;
57         ar_t ar;
58         u64 op2, val;
59
60         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
61                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
62
63         op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
64         if (op2 & 7)    /* Operand must be on a doubleword boundary */
65                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
66         rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
67         if (rc)
68                 return kvm_s390_inject_prog_cond(vcpu, rc);
69
70         VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
71         kvm_s390_set_tod_clock(vcpu->kvm, val);
72
73         kvm_s390_set_psw_cc(vcpu, 0);
74         return 0;
75 }
76
77 static int handle_set_prefix(struct kvm_vcpu *vcpu)
78 {
79         u64 operand2;
80         u32 address;
81         int rc;
82         ar_t ar;
83
84         vcpu->stat.instruction_spx++;
85
86         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
87                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
88
89         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
90
91         /* must be word boundary */
92         if (operand2 & 3)
93                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
94
95         /* get the value */
96         rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
97         if (rc)
98                 return kvm_s390_inject_prog_cond(vcpu, rc);
99
100         address &= 0x7fffe000u;
101
102         /*
103          * Make sure the new value is valid memory. We only need to check the
104          * first page, since address is 8k aligned and memory pieces are always
105          * at least 1MB aligned and have at least a size of 1MB.
106          */
107         if (kvm_is_error_gpa(vcpu->kvm, address))
108                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
109
110         kvm_s390_set_prefix(vcpu, address);
111         trace_kvm_s390_handle_prefix(vcpu, 1, address);
112         return 0;
113 }
114
115 static int handle_store_prefix(struct kvm_vcpu *vcpu)
116 {
117         u64 operand2;
118         u32 address;
119         int rc;
120         ar_t ar;
121
122         vcpu->stat.instruction_stpx++;
123
124         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
125                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
126
127         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
128
129         /* must be word boundary */
130         if (operand2 & 3)
131                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
132
133         address = kvm_s390_get_prefix(vcpu);
134
135         /* get the value */
136         rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
137         if (rc)
138                 return kvm_s390_inject_prog_cond(vcpu, rc);
139
140         VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
141         trace_kvm_s390_handle_prefix(vcpu, 0, address);
142         return 0;
143 }
144
145 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
146 {
147         u16 vcpu_id = vcpu->vcpu_id;
148         u64 ga;
149         int rc;
150         ar_t ar;
151
152         vcpu->stat.instruction_stap++;
153
154         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
155                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
156
157         ga = kvm_s390_get_base_disp_s(vcpu, &ar);
158
159         if (ga & 1)
160                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
161
162         rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
163         if (rc)
164                 return kvm_s390_inject_prog_cond(vcpu, rc);
165
166         VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
167         trace_kvm_s390_handle_stap(vcpu, ga);
168         return 0;
169 }
170
171 static int __skey_check_enable(struct kvm_vcpu *vcpu)
172 {
173         int rc = 0;
174
175         trace_kvm_s390_skey_related_inst(vcpu);
176         if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
177                 return rc;
178
179         rc = s390_enable_skey();
180         VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
181         if (!rc)
182                 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
183         return rc;
184 }
185
186 static int try_handle_skey(struct kvm_vcpu *vcpu)
187 {
188         int rc;
189
190         vcpu->stat.instruction_storage_key++;
191         rc = __skey_check_enable(vcpu);
192         if (rc)
193                 return rc;
194         if (sclp.has_skey) {
195                 /* with storage-key facility, SIE interprets it for us */
196                 kvm_s390_retry_instr(vcpu);
197                 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
198                 return -EAGAIN;
199         }
200         return 0;
201 }
202
203 static int handle_iske(struct kvm_vcpu *vcpu)
204 {
205         unsigned long addr;
206         unsigned char key;
207         int reg1, reg2;
208         int rc;
209
210         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
211                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
212
213         rc = try_handle_skey(vcpu);
214         if (rc)
215                 return rc != -EAGAIN ? rc : 0;
216
217         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
218
219         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
220         addr = kvm_s390_logical_to_effective(vcpu, addr);
221         addr = kvm_s390_real_to_abs(vcpu, addr);
222         addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
223         if (kvm_is_error_hva(addr))
224                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
225
226         down_read(&current->mm->mmap_sem);
227         rc = get_guest_storage_key(current->mm, addr, &key);
228         up_read(&current->mm->mmap_sem);
229         if (rc)
230                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
231         vcpu->run->s.regs.gprs[reg1] &= ~0xff;
232         vcpu->run->s.regs.gprs[reg1] |= key;
233         return 0;
234 }
235
236 static int handle_rrbe(struct kvm_vcpu *vcpu)
237 {
238         unsigned long addr;
239         int reg1, reg2;
240         int rc;
241
242         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
243                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
244
245         rc = try_handle_skey(vcpu);
246         if (rc)
247                 return rc != -EAGAIN ? rc : 0;
248
249         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
250
251         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
252         addr = kvm_s390_logical_to_effective(vcpu, addr);
253         addr = kvm_s390_real_to_abs(vcpu, addr);
254         addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
255         if (kvm_is_error_hva(addr))
256                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
257
258         down_read(&current->mm->mmap_sem);
259         rc = reset_guest_reference_bit(current->mm, addr);
260         up_read(&current->mm->mmap_sem);
261         if (rc < 0)
262                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
263
264         kvm_s390_set_psw_cc(vcpu, rc);
265         return 0;
266 }
267
268 #define SSKE_NQ 0x8
269 #define SSKE_MR 0x4
270 #define SSKE_MC 0x2
271 #define SSKE_MB 0x1
272 static int handle_sske(struct kvm_vcpu *vcpu)
273 {
274         unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
275         unsigned long start, end;
276         unsigned char key, oldkey;
277         int reg1, reg2;
278         int rc;
279
280         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
281                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
282
283         rc = try_handle_skey(vcpu);
284         if (rc)
285                 return rc != -EAGAIN ? rc : 0;
286
287         if (!test_kvm_facility(vcpu->kvm, 8))
288                 m3 &= ~SSKE_MB;
289         if (!test_kvm_facility(vcpu->kvm, 10))
290                 m3 &= ~(SSKE_MC | SSKE_MR);
291         if (!test_kvm_facility(vcpu->kvm, 14))
292                 m3 &= ~SSKE_NQ;
293
294         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
295
296         key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
297         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
298         start = kvm_s390_logical_to_effective(vcpu, start);
299         if (m3 & SSKE_MB) {
300                 /* start already designates an absolute address */
301                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
302         } else {
303                 start = kvm_s390_real_to_abs(vcpu, start);
304                 end = start + PAGE_SIZE;
305         }
306
307         while (start != end) {
308                 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
309
310                 if (kvm_is_error_hva(addr))
311                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
312
313                 down_read(&current->mm->mmap_sem);
314                 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
315                                                 m3 & SSKE_NQ, m3 & SSKE_MR,
316                                                 m3 & SSKE_MC);
317                 up_read(&current->mm->mmap_sem);
318                 if (rc < 0)
319                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
320                 start += PAGE_SIZE;
321         };
322
323         if (m3 & (SSKE_MC | SSKE_MR)) {
324                 if (m3 & SSKE_MB) {
325                         /* skey in reg1 is unpredictable */
326                         kvm_s390_set_psw_cc(vcpu, 3);
327                 } else {
328                         kvm_s390_set_psw_cc(vcpu, rc);
329                         vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
330                         vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
331                 }
332         }
333         if (m3 & SSKE_MB) {
334                 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
335                         vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
336                 else
337                         vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
338                 end = kvm_s390_logical_to_effective(vcpu, end);
339                 vcpu->run->s.regs.gprs[reg2] |= end;
340         }
341         return 0;
342 }
343
344 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
345 {
346         vcpu->stat.instruction_ipte_interlock++;
347         if (psw_bits(vcpu->arch.sie_block->gpsw).p)
348                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
349         wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
350         kvm_s390_retry_instr(vcpu);
351         VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
352         return 0;
353 }
354
355 static int handle_test_block(struct kvm_vcpu *vcpu)
356 {
357         gpa_t addr;
358         int reg2;
359
360         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
361                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
362
363         kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
364         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
365         addr = kvm_s390_logical_to_effective(vcpu, addr);
366         if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
367                 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
368         addr = kvm_s390_real_to_abs(vcpu, addr);
369
370         if (kvm_is_error_gpa(vcpu->kvm, addr))
371                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
372         /*
373          * We don't expect errors on modern systems, and do not care
374          * about storage keys (yet), so let's just clear the page.
375          */
376         if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
377                 return -EFAULT;
378         kvm_s390_set_psw_cc(vcpu, 0);
379         vcpu->run->s.regs.gprs[0] = 0;
380         return 0;
381 }
382
383 static int handle_tpi(struct kvm_vcpu *vcpu)
384 {
385         struct kvm_s390_interrupt_info *inti;
386         unsigned long len;
387         u32 tpi_data[3];
388         int rc;
389         u64 addr;
390         ar_t ar;
391
392         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
393         if (addr & 3)
394                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
395
396         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
397         if (!inti) {
398                 kvm_s390_set_psw_cc(vcpu, 0);
399                 return 0;
400         }
401
402         tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
403         tpi_data[1] = inti->io.io_int_parm;
404         tpi_data[2] = inti->io.io_int_word;
405         if (addr) {
406                 /*
407                  * Store the two-word I/O interruption code into the
408                  * provided area.
409                  */
410                 len = sizeof(tpi_data) - 4;
411                 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
412                 if (rc) {
413                         rc = kvm_s390_inject_prog_cond(vcpu, rc);
414                         goto reinject_interrupt;
415                 }
416         } else {
417                 /*
418                  * Store the three-word I/O interruption code into
419                  * the appropriate lowcore area.
420                  */
421                 len = sizeof(tpi_data);
422                 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
423                         /* failed writes to the low core are not recoverable */
424                         rc = -EFAULT;
425                         goto reinject_interrupt;
426                 }
427         }
428
429         /* irq was successfully handed to the guest */
430         kfree(inti);
431         kvm_s390_set_psw_cc(vcpu, 1);
432         return 0;
433 reinject_interrupt:
434         /*
435          * If we encounter a problem storing the interruption code, the
436          * instruction is suppressed from the guest's view: reinject the
437          * interrupt.
438          */
439         if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
440                 kfree(inti);
441                 rc = -EFAULT;
442         }
443         /* don't set the cc, a pgm irq was injected or we drop to user space */
444         return rc ? -EFAULT : 0;
445 }
446
447 static int handle_tsch(struct kvm_vcpu *vcpu)
448 {
449         struct kvm_s390_interrupt_info *inti = NULL;
450         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
451
452         /* a valid schid has at least one bit set */
453         if (vcpu->run->s.regs.gprs[1])
454                 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
455                                            vcpu->run->s.regs.gprs[1]);
456
457         /*
458          * Prepare exit to userspace.
459          * We indicate whether we dequeued a pending I/O interrupt
460          * so that userspace can re-inject it if the instruction gets
461          * a program check. While this may re-order the pending I/O
462          * interrupts, this is no problem since the priority is kept
463          * intact.
464          */
465         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
466         vcpu->run->s390_tsch.dequeued = !!inti;
467         if (inti) {
468                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
469                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
470                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
471                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
472         }
473         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
474         kfree(inti);
475         return -EREMOTE;
476 }
477
478 static int handle_io_inst(struct kvm_vcpu *vcpu)
479 {
480         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
481
482         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
483                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
484
485         if (vcpu->kvm->arch.css_support) {
486                 /*
487                  * Most I/O instructions will be handled by userspace.
488                  * Exceptions are tpi and the interrupt portion of tsch.
489                  */
490                 if (vcpu->arch.sie_block->ipa == 0xb236)
491                         return handle_tpi(vcpu);
492                 if (vcpu->arch.sie_block->ipa == 0xb235)
493                         return handle_tsch(vcpu);
494                 /* Handle in userspace. */
495                 return -EOPNOTSUPP;
496         } else {
497                 /*
498                  * Set condition code 3 to stop the guest from issuing channel
499                  * I/O instructions.
500                  */
501                 kvm_s390_set_psw_cc(vcpu, 3);
502                 return 0;
503         }
504 }
505
506 static int handle_stfl(struct kvm_vcpu *vcpu)
507 {
508         int rc;
509         unsigned int fac;
510
511         vcpu->stat.instruction_stfl++;
512
513         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
514                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
515
516         /*
517          * We need to shift the lower 32 facility bits (bit 0-31) from a u64
518          * into a u32 memory representation. They will remain bits 0-31.
519          */
520         fac = *vcpu->kvm->arch.model.fac_list >> 32;
521         rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
522                             &fac, sizeof(fac));
523         if (rc)
524                 return rc;
525         VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
526         trace_kvm_s390_handle_stfl(vcpu, fac);
527         return 0;
528 }
529
530 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
531 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
532 #define PSW_ADDR_24 0x0000000000ffffffUL
533 #define PSW_ADDR_31 0x000000007fffffffUL
534
535 int is_valid_psw(psw_t *psw)
536 {
537         if (psw->mask & PSW_MASK_UNASSIGNED)
538                 return 0;
539         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
540                 if (psw->addr & ~PSW_ADDR_31)
541                         return 0;
542         }
543         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
544                 return 0;
545         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
546                 return 0;
547         if (psw->addr & 1)
548                 return 0;
549         return 1;
550 }
551
552 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
553 {
554         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
555         psw_compat_t new_psw;
556         u64 addr;
557         int rc;
558         ar_t ar;
559
560         if (gpsw->mask & PSW_MASK_PSTATE)
561                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
562
563         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
564         if (addr & 7)
565                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
566
567         rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
568         if (rc)
569                 return kvm_s390_inject_prog_cond(vcpu, rc);
570         if (!(new_psw.mask & PSW32_MASK_BASE))
571                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
572         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
573         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
574         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
575         if (!is_valid_psw(gpsw))
576                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
577         return 0;
578 }
579
580 static int handle_lpswe(struct kvm_vcpu *vcpu)
581 {
582         psw_t new_psw;
583         u64 addr;
584         int rc;
585         ar_t ar;
586
587         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
588                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
589
590         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
591         if (addr & 7)
592                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
593         rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
594         if (rc)
595                 return kvm_s390_inject_prog_cond(vcpu, rc);
596         vcpu->arch.sie_block->gpsw = new_psw;
597         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
598                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
599         return 0;
600 }
601
602 static int handle_stidp(struct kvm_vcpu *vcpu)
603 {
604         u64 stidp_data = vcpu->kvm->arch.model.cpuid;
605         u64 operand2;
606         int rc;
607         ar_t ar;
608
609         vcpu->stat.instruction_stidp++;
610
611         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
612                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
613
614         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
615
616         if (operand2 & 7)
617                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
618
619         rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
620         if (rc)
621                 return kvm_s390_inject_prog_cond(vcpu, rc);
622
623         VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
624         return 0;
625 }
626
627 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
628 {
629         int cpus = 0;
630         int n;
631
632         cpus = atomic_read(&vcpu->kvm->online_vcpus);
633
634         /* deal with other level 3 hypervisors */
635         if (stsi(mem, 3, 2, 2))
636                 mem->count = 0;
637         if (mem->count < 8)
638                 mem->count++;
639         for (n = mem->count - 1; n > 0 ; n--)
640                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
641
642         memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
643         mem->vm[0].cpus_total = cpus;
644         mem->vm[0].cpus_configured = cpus;
645         mem->vm[0].cpus_standby = 0;
646         mem->vm[0].cpus_reserved = 0;
647         mem->vm[0].caf = 1000;
648         memcpy(mem->vm[0].name, "KVMguest", 8);
649         ASCEBC(mem->vm[0].name, 8);
650         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
651         ASCEBC(mem->vm[0].cpi, 16);
652 }
653
654 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
655                                  u8 fc, u8 sel1, u16 sel2)
656 {
657         vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
658         vcpu->run->s390_stsi.addr = addr;
659         vcpu->run->s390_stsi.ar = ar;
660         vcpu->run->s390_stsi.fc = fc;
661         vcpu->run->s390_stsi.sel1 = sel1;
662         vcpu->run->s390_stsi.sel2 = sel2;
663 }
664
665 static int handle_stsi(struct kvm_vcpu *vcpu)
666 {
667         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
668         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
669         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
670         unsigned long mem = 0;
671         u64 operand2;
672         int rc = 0;
673         ar_t ar;
674
675         vcpu->stat.instruction_stsi++;
676         VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
677
678         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
679                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
680
681         if (fc > 3) {
682                 kvm_s390_set_psw_cc(vcpu, 3);
683                 return 0;
684         }
685
686         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
687             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
688                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
689
690         if (fc == 0) {
691                 vcpu->run->s.regs.gprs[0] = 3 << 28;
692                 kvm_s390_set_psw_cc(vcpu, 0);
693                 return 0;
694         }
695
696         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
697
698         if (operand2 & 0xfff)
699                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
700
701         switch (fc) {
702         case 1: /* same handling for 1 and 2 */
703         case 2:
704                 mem = get_zeroed_page(GFP_KERNEL);
705                 if (!mem)
706                         goto out_no_data;
707                 if (stsi((void *) mem, fc, sel1, sel2))
708                         goto out_no_data;
709                 break;
710         case 3:
711                 if (sel1 != 2 || sel2 != 2)
712                         goto out_no_data;
713                 mem = get_zeroed_page(GFP_KERNEL);
714                 if (!mem)
715                         goto out_no_data;
716                 handle_stsi_3_2_2(vcpu, (void *) mem);
717                 break;
718         }
719
720         rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
721         if (rc) {
722                 rc = kvm_s390_inject_prog_cond(vcpu, rc);
723                 goto out;
724         }
725         if (vcpu->kvm->arch.user_stsi) {
726                 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
727                 rc = -EREMOTE;
728         }
729         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
730         free_page(mem);
731         kvm_s390_set_psw_cc(vcpu, 0);
732         vcpu->run->s.regs.gprs[0] = 0;
733         return rc;
734 out_no_data:
735         kvm_s390_set_psw_cc(vcpu, 3);
736 out:
737         free_page(mem);
738         return rc;
739 }
740
741 static const intercept_handler_t b2_handlers[256] = {
742         [0x02] = handle_stidp,
743         [0x04] = handle_set_clock,
744         [0x10] = handle_set_prefix,
745         [0x11] = handle_store_prefix,
746         [0x12] = handle_store_cpu_address,
747         [0x14] = kvm_s390_handle_vsie,
748         [0x21] = handle_ipte_interlock,
749         [0x29] = handle_iske,
750         [0x2a] = handle_rrbe,
751         [0x2b] = handle_sske,
752         [0x2c] = handle_test_block,
753         [0x30] = handle_io_inst,
754         [0x31] = handle_io_inst,
755         [0x32] = handle_io_inst,
756         [0x33] = handle_io_inst,
757         [0x34] = handle_io_inst,
758         [0x35] = handle_io_inst,
759         [0x36] = handle_io_inst,
760         [0x37] = handle_io_inst,
761         [0x38] = handle_io_inst,
762         [0x39] = handle_io_inst,
763         [0x3a] = handle_io_inst,
764         [0x3b] = handle_io_inst,
765         [0x3c] = handle_io_inst,
766         [0x50] = handle_ipte_interlock,
767         [0x5f] = handle_io_inst,
768         [0x74] = handle_io_inst,
769         [0x76] = handle_io_inst,
770         [0x7d] = handle_stsi,
771         [0xb1] = handle_stfl,
772         [0xb2] = handle_lpswe,
773 };
774
775 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
776 {
777         intercept_handler_t handler;
778
779         /*
780          * A lot of B2 instructions are priviledged. Here we check for
781          * the privileged ones, that we can handle in the kernel.
782          * Anything else goes to userspace.
783          */
784         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
785         if (handler)
786                 return handler(vcpu);
787
788         return -EOPNOTSUPP;
789 }
790
791 static int handle_epsw(struct kvm_vcpu *vcpu)
792 {
793         int reg1, reg2;
794
795         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
796
797         /* This basically extracts the mask half of the psw. */
798         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
799         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
800         if (reg2) {
801                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
802                 vcpu->run->s.regs.gprs[reg2] |=
803                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
804         }
805         return 0;
806 }
807
808 #define PFMF_RESERVED   0xfffc0101UL
809 #define PFMF_SK         0x00020000UL
810 #define PFMF_CF         0x00010000UL
811 #define PFMF_UI         0x00008000UL
812 #define PFMF_FSC        0x00007000UL
813 #define PFMF_NQ         0x00000800UL
814 #define PFMF_MR         0x00000400UL
815 #define PFMF_MC         0x00000200UL
816 #define PFMF_KEY        0x000000feUL
817
818 static int handle_pfmf(struct kvm_vcpu *vcpu)
819 {
820         bool mr = false, mc = false, nq;
821         int reg1, reg2;
822         unsigned long start, end;
823         unsigned char key;
824
825         vcpu->stat.instruction_pfmf++;
826
827         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
828
829         if (!test_kvm_facility(vcpu->kvm, 8))
830                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
831
832         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
833                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
834
835         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
836                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
837
838         /* Only provide non-quiescing support if enabled for the guest */
839         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
840             !test_kvm_facility(vcpu->kvm, 14))
841                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
842
843         /* Only provide conditional-SSKE support if enabled for the guest */
844         if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
845             test_kvm_facility(vcpu->kvm, 10)) {
846                 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
847                 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
848         }
849
850         nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
851         key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
852         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
853         start = kvm_s390_logical_to_effective(vcpu, start);
854
855         if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
856                 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
857                         return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
858         }
859
860         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
861         case 0x00000000:
862                 /* only 4k frames specify a real address */
863                 start = kvm_s390_real_to_abs(vcpu, start);
864                 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
865                 break;
866         case 0x00001000:
867                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
868                 break;
869         case 0x00002000:
870                 /* only support 2G frame size if EDAT2 is available and we are
871                    not in 24-bit addressing mode */
872                 if (!test_kvm_facility(vcpu->kvm, 78) ||
873                     psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
874                         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
875                 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
876                 break;
877         default:
878                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
879         }
880
881         while (start != end) {
882                 unsigned long useraddr;
883
884                 /* Translate guest address to host address */
885                 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
886                 if (kvm_is_error_hva(useraddr))
887                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
888
889                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
890                         if (clear_user((void __user *)useraddr, PAGE_SIZE))
891                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
892                 }
893
894                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
895                         int rc = __skey_check_enable(vcpu);
896
897                         if (rc)
898                                 return rc;
899                         down_read(&current->mm->mmap_sem);
900                         rc = cond_set_guest_storage_key(current->mm, useraddr,
901                                                         key, NULL, nq, mr, mc);
902                         up_read(&current->mm->mmap_sem);
903                         if (rc < 0)
904                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
905                 }
906
907                 start += PAGE_SIZE;
908         }
909         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
910                 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
911                         vcpu->run->s.regs.gprs[reg2] = end;
912                 } else {
913                         vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
914                         end = kvm_s390_logical_to_effective(vcpu, end);
915                         vcpu->run->s.regs.gprs[reg2] |= end;
916                 }
917         }
918         return 0;
919 }
920
921 static int handle_essa(struct kvm_vcpu *vcpu)
922 {
923         /* entries expected to be 1FF */
924         int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
925         unsigned long *cbrlo;
926         struct gmap *gmap;
927         int i;
928
929         VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
930         gmap = vcpu->arch.gmap;
931         vcpu->stat.instruction_essa++;
932         if (!vcpu->kvm->arch.use_cmma)
933                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
934
935         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
936                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
937
938         if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
939                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
940
941         /* Retry the ESSA instruction */
942         kvm_s390_retry_instr(vcpu);
943         vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
944         cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
945         down_read(&gmap->mm->mmap_sem);
946         for (i = 0; i < entries; ++i)
947                 __gmap_zap(gmap, cbrlo[i]);
948         up_read(&gmap->mm->mmap_sem);
949         return 0;
950 }
951
952 static const intercept_handler_t b9_handlers[256] = {
953         [0x8a] = handle_ipte_interlock,
954         [0x8d] = handle_epsw,
955         [0x8e] = handle_ipte_interlock,
956         [0x8f] = handle_ipte_interlock,
957         [0xab] = handle_essa,
958         [0xaf] = handle_pfmf,
959 };
960
961 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
962 {
963         intercept_handler_t handler;
964
965         /* This is handled just as for the B2 instructions. */
966         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
967         if (handler)
968                 return handler(vcpu);
969
970         return -EOPNOTSUPP;
971 }
972
973 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
974 {
975         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
976         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
977         int reg, rc, nr_regs;
978         u32 ctl_array[16];
979         u64 ga;
980         ar_t ar;
981
982         vcpu->stat.instruction_lctl++;
983
984         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
985                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
986
987         ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
988
989         if (ga & 3)
990                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
991
992         VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
993         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
994
995         nr_regs = ((reg3 - reg1) & 0xf) + 1;
996         rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
997         if (rc)
998                 return kvm_s390_inject_prog_cond(vcpu, rc);
999         reg = reg1;
1000         nr_regs = 0;
1001         do {
1002                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1003                 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1004                 if (reg == reg3)
1005                         break;
1006                 reg = (reg + 1) % 16;
1007         } while (1);
1008         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1009         return 0;
1010 }
1011
1012 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1013 {
1014         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1015         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1016         int reg, rc, nr_regs;
1017         u32 ctl_array[16];
1018         u64 ga;
1019         ar_t ar;
1020
1021         vcpu->stat.instruction_stctl++;
1022
1023         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1024                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1025
1026         ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1027
1028         if (ga & 3)
1029                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1030
1031         VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1032         trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1033
1034         reg = reg1;
1035         nr_regs = 0;
1036         do {
1037                 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1038                 if (reg == reg3)
1039                         break;
1040                 reg = (reg + 1) % 16;
1041         } while (1);
1042         rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1043         return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1044 }
1045
1046 static int handle_lctlg(struct kvm_vcpu *vcpu)
1047 {
1048         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1049         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1050         int reg, rc, nr_regs;
1051         u64 ctl_array[16];
1052         u64 ga;
1053         ar_t ar;
1054
1055         vcpu->stat.instruction_lctlg++;
1056
1057         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1058                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1059
1060         ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1061
1062         if (ga & 7)
1063                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1064
1065         VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1066         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1067
1068         nr_regs = ((reg3 - reg1) & 0xf) + 1;
1069         rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1070         if (rc)
1071                 return kvm_s390_inject_prog_cond(vcpu, rc);
1072         reg = reg1;
1073         nr_regs = 0;
1074         do {
1075                 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1076                 if (reg == reg3)
1077                         break;
1078                 reg = (reg + 1) % 16;
1079         } while (1);
1080         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1081         return 0;
1082 }
1083
1084 static int handle_stctg(struct kvm_vcpu *vcpu)
1085 {
1086         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1087         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1088         int reg, rc, nr_regs;
1089         u64 ctl_array[16];
1090         u64 ga;
1091         ar_t ar;
1092
1093         vcpu->stat.instruction_stctg++;
1094
1095         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1096                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1097
1098         ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1099
1100         if (ga & 7)
1101                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1102
1103         VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1104         trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1105
1106         reg = reg1;
1107         nr_regs = 0;
1108         do {
1109                 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1110                 if (reg == reg3)
1111                         break;
1112                 reg = (reg + 1) % 16;
1113         } while (1);
1114         rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1115         return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1116 }
1117
1118 static const intercept_handler_t eb_handlers[256] = {
1119         [0x2f] = handle_lctlg,
1120         [0x25] = handle_stctg,
1121         [0x60] = handle_ri,
1122         [0x61] = handle_ri,
1123         [0x62] = handle_ri,
1124 };
1125
1126 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1127 {
1128         intercept_handler_t handler;
1129
1130         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
1131         if (handler)
1132                 return handler(vcpu);
1133         return -EOPNOTSUPP;
1134 }
1135
1136 static int handle_tprot(struct kvm_vcpu *vcpu)
1137 {
1138         u64 address1, address2;
1139         unsigned long hva, gpa;
1140         int ret = 0, cc = 0;
1141         bool writable;
1142         ar_t ar;
1143
1144         vcpu->stat.instruction_tprot++;
1145
1146         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1147                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1148
1149         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1150
1151         /* we only handle the Linux memory detection case:
1152          * access key == 0
1153          * everything else goes to userspace. */
1154         if (address2 & 0xf0)
1155                 return -EOPNOTSUPP;
1156         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1157                 ipte_lock(vcpu);
1158         ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1159         if (ret == PGM_PROTECTION) {
1160                 /* Write protected? Try again with read-only... */
1161                 cc = 1;
1162                 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1163                                               GACC_FETCH);
1164         }
1165         if (ret) {
1166                 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1167                         ret = kvm_s390_inject_program_int(vcpu, ret);
1168                 } else if (ret > 0) {
1169                         /* Translation not available */
1170                         kvm_s390_set_psw_cc(vcpu, 3);
1171                         ret = 0;
1172                 }
1173                 goto out_unlock;
1174         }
1175
1176         hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1177         if (kvm_is_error_hva(hva)) {
1178                 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1179         } else {
1180                 if (!writable)
1181                         cc = 1;         /* Write not permitted ==> read-only */
1182                 kvm_s390_set_psw_cc(vcpu, cc);
1183                 /* Note: CC2 only occurs for storage keys (not supported yet) */
1184         }
1185 out_unlock:
1186         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1187                 ipte_unlock(vcpu);
1188         return ret;
1189 }
1190
1191 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1192 {
1193         /* For e5xx... instructions we only handle TPROT */
1194         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
1195                 return handle_tprot(vcpu);
1196         return -EOPNOTSUPP;
1197 }
1198
1199 static int handle_sckpf(struct kvm_vcpu *vcpu)
1200 {
1201         u32 value;
1202
1203         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1204                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1205
1206         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1207                 return kvm_s390_inject_program_int(vcpu,
1208                                                    PGM_SPECIFICATION);
1209
1210         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1211         vcpu->arch.sie_block->todpr = value;
1212
1213         return 0;
1214 }
1215
1216 static int handle_ptff(struct kvm_vcpu *vcpu)
1217 {
1218         /* we don't emulate any control instructions yet */
1219         kvm_s390_set_psw_cc(vcpu, 3);
1220         return 0;
1221 }
1222
1223 static const intercept_handler_t x01_handlers[256] = {
1224         [0x04] = handle_ptff,
1225         [0x07] = handle_sckpf,
1226 };
1227
1228 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1229 {
1230         intercept_handler_t handler;
1231
1232         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1233         if (handler)
1234                 return handler(vcpu);
1235         return -EOPNOTSUPP;
1236 }