GNU Linux-libre 5.10.217-gnu1
[releases.git] / arch / s390 / kvm / interrupt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling kvm guest interrupts
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  */
9
10 #define KMSG_COMPONENT "kvm-s390"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/nospec.h>
18 #include <linux/signal.h>
19 #include <linux/slab.h>
20 #include <linux/bitmap.h>
21 #include <linux/vmalloc.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/dis.h>
24 #include <linux/uaccess.h>
25 #include <asm/sclp.h>
26 #include <asm/isc.h>
27 #include <asm/gmap.h>
28 #include <asm/switch_to.h>
29 #include <asm/nmi.h>
30 #include <asm/airq.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33 #include "trace-s390.h"
34
35 #define PFAULT_INIT 0x0600
36 #define PFAULT_DONE 0x0680
37 #define VIRTIO_PARAM 0x0d00
38
39 static struct kvm_s390_gib *gib;
40
41 /* handle external calls via sigp interpretation facility */
42 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
43 {
44         int c, scn;
45
46         if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
47                 return 0;
48
49         BUG_ON(!kvm_s390_use_sca_entries());
50         read_lock(&vcpu->kvm->arch.sca_lock);
51         if (vcpu->kvm->arch.use_esca) {
52                 struct esca_block *sca = vcpu->kvm->arch.sca;
53                 union esca_sigp_ctrl sigp_ctrl =
54                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55
56                 c = sigp_ctrl.c;
57                 scn = sigp_ctrl.scn;
58         } else {
59                 struct bsca_block *sca = vcpu->kvm->arch.sca;
60                 union bsca_sigp_ctrl sigp_ctrl =
61                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
62
63                 c = sigp_ctrl.c;
64                 scn = sigp_ctrl.scn;
65         }
66         read_unlock(&vcpu->kvm->arch.sca_lock);
67
68         if (src_id)
69                 *src_id = scn;
70
71         return c;
72 }
73
74 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
75 {
76         int expect, rc;
77
78         BUG_ON(!kvm_s390_use_sca_entries());
79         read_lock(&vcpu->kvm->arch.sca_lock);
80         if (vcpu->kvm->arch.use_esca) {
81                 struct esca_block *sca = vcpu->kvm->arch.sca;
82                 union esca_sigp_ctrl *sigp_ctrl =
83                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
84                 union esca_sigp_ctrl new_val = {0}, old_val;
85
86                 old_val = READ_ONCE(*sigp_ctrl);
87                 new_val.scn = src_id;
88                 new_val.c = 1;
89                 old_val.c = 0;
90
91                 expect = old_val.value;
92                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
93         } else {
94                 struct bsca_block *sca = vcpu->kvm->arch.sca;
95                 union bsca_sigp_ctrl *sigp_ctrl =
96                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
97                 union bsca_sigp_ctrl new_val = {0}, old_val;
98
99                 old_val = READ_ONCE(*sigp_ctrl);
100                 new_val.scn = src_id;
101                 new_val.c = 1;
102                 old_val.c = 0;
103
104                 expect = old_val.value;
105                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
106         }
107         read_unlock(&vcpu->kvm->arch.sca_lock);
108
109         if (rc != expect) {
110                 /* another external call is pending */
111                 return -EBUSY;
112         }
113         kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
114         return 0;
115 }
116
117 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
118 {
119         int rc, expect;
120
121         if (!kvm_s390_use_sca_entries())
122                 return;
123         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
124         read_lock(&vcpu->kvm->arch.sca_lock);
125         if (vcpu->kvm->arch.use_esca) {
126                 struct esca_block *sca = vcpu->kvm->arch.sca;
127                 union esca_sigp_ctrl *sigp_ctrl =
128                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
129                 union esca_sigp_ctrl old;
130
131                 old = READ_ONCE(*sigp_ctrl);
132                 expect = old.value;
133                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
134         } else {
135                 struct bsca_block *sca = vcpu->kvm->arch.sca;
136                 union bsca_sigp_ctrl *sigp_ctrl =
137                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
138                 union bsca_sigp_ctrl old;
139
140                 old = READ_ONCE(*sigp_ctrl);
141                 expect = old.value;
142                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
143         }
144         read_unlock(&vcpu->kvm->arch.sca_lock);
145         WARN_ON(rc != expect); /* cannot clear? */
146 }
147
148 int psw_extint_disabled(struct kvm_vcpu *vcpu)
149 {
150         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
151 }
152
153 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
154 {
155         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
156 }
157
158 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
159 {
160         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
161 }
162
163 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
164 {
165         return psw_extint_disabled(vcpu) &&
166                psw_ioint_disabled(vcpu) &&
167                psw_mchk_disabled(vcpu);
168 }
169
170 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
171 {
172         if (psw_extint_disabled(vcpu) ||
173             !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
174                 return 0;
175         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
176                 /* No timer interrupts when single stepping */
177                 return 0;
178         return 1;
179 }
180
181 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
182 {
183         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
184         const u64 ckc = vcpu->arch.sie_block->ckc;
185
186         if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
187                 if ((s64)ckc >= (s64)now)
188                         return 0;
189         } else if (ckc >= now) {
190                 return 0;
191         }
192         return ckc_interrupts_enabled(vcpu);
193 }
194
195 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
196 {
197         return !psw_extint_disabled(vcpu) &&
198                (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
199 }
200
201 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
202 {
203         if (!cpu_timer_interrupts_enabled(vcpu))
204                 return 0;
205         return kvm_s390_get_cpu_timer(vcpu) >> 63;
206 }
207
208 static uint64_t isc_to_isc_bits(int isc)
209 {
210         return (0x80 >> isc) << 24;
211 }
212
213 static inline u32 isc_to_int_word(u8 isc)
214 {
215         return ((u32)isc << 27) | 0x80000000;
216 }
217
218 static inline u8 int_word_to_isc(u32 int_word)
219 {
220         return (int_word & 0x38000000) >> 27;
221 }
222
223 /*
224  * To use atomic bitmap functions, we have to provide a bitmap address
225  * that is u64 aligned. However, the ipm might be u32 aligned.
226  * Therefore, we logically start the bitmap at the very beginning of the
227  * struct and fixup the bit number.
228  */
229 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
230
231 /**
232  * gisa_set_iam - change the GISA interruption alert mask
233  *
234  * @gisa: gisa to operate on
235  * @iam: new IAM value to use
236  *
237  * Change the IAM atomically with the next alert address and the IPM
238  * of the GISA if the GISA is not part of the GIB alert list. All three
239  * fields are located in the first long word of the GISA.
240  *
241  * Returns: 0 on success
242  *          -EBUSY in case the gisa is part of the alert list
243  */
244 static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
245 {
246         u64 word, _word;
247
248         do {
249                 word = READ_ONCE(gisa->u64.word[0]);
250                 if ((u64)gisa != word >> 32)
251                         return -EBUSY;
252                 _word = (word & ~0xffUL) | iam;
253         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
254
255         return 0;
256 }
257
258 /**
259  * gisa_clear_ipm - clear the GISA interruption pending mask
260  *
261  * @gisa: gisa to operate on
262  *
263  * Clear the IPM atomically with the next alert address and the IAM
264  * of the GISA unconditionally. All three fields are located in the
265  * first long word of the GISA.
266  */
267 static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
268 {
269         u64 word, _word;
270
271         do {
272                 word = READ_ONCE(gisa->u64.word[0]);
273                 _word = word & ~(0xffUL << 24);
274         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
275 }
276
277 /**
278  * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
279  *
280  * @gi: gisa interrupt struct to work on
281  *
282  * Atomically restores the interruption alert mask if none of the
283  * relevant ISCs are pending and return the IPM.
284  *
285  * Returns: the relevant pending ISCs
286  */
287 static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
288 {
289         u8 pending_mask, alert_mask;
290         u64 word, _word;
291
292         do {
293                 word = READ_ONCE(gi->origin->u64.word[0]);
294                 alert_mask = READ_ONCE(gi->alert.mask);
295                 pending_mask = (u8)(word >> 24) & alert_mask;
296                 if (pending_mask)
297                         return pending_mask;
298                 _word = (word & ~0xffUL) | alert_mask;
299         } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
300
301         return 0;
302 }
303
304 static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
305 {
306         return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
307 }
308
309 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
310 {
311         set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
312 }
313
314 static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
315 {
316         return READ_ONCE(gisa->ipm);
317 }
318
319 static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
320 {
321         clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
322 }
323
324 static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
325 {
326         return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
327 }
328
329 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
330 {
331         unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
332                                 vcpu->arch.local_int.pending_irqs;
333
334         pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
335         return pending;
336 }
337
338 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
339 {
340         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
341         unsigned long pending_mask;
342
343         pending_mask = pending_irqs_no_gisa(vcpu);
344         if (gi->origin)
345                 pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
346         return pending_mask;
347 }
348
349 static inline int isc_to_irq_type(unsigned long isc)
350 {
351         return IRQ_PEND_IO_ISC_0 - isc;
352 }
353
354 static inline int irq_type_to_isc(unsigned long irq_type)
355 {
356         return IRQ_PEND_IO_ISC_0 - irq_type;
357 }
358
359 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
360                                    unsigned long active_mask)
361 {
362         int i;
363
364         for (i = 0; i <= MAX_ISC; i++)
365                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
366                         active_mask &= ~(1UL << (isc_to_irq_type(i)));
367
368         return active_mask;
369 }
370
371 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
372 {
373         unsigned long active_mask;
374
375         active_mask = pending_irqs(vcpu);
376         if (!active_mask)
377                 return 0;
378
379         if (psw_extint_disabled(vcpu))
380                 active_mask &= ~IRQ_PEND_EXT_MASK;
381         if (psw_ioint_disabled(vcpu))
382                 active_mask &= ~IRQ_PEND_IO_MASK;
383         else
384                 active_mask = disable_iscs(vcpu, active_mask);
385         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
386                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
387         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
388                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
389         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
390                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
391         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
392                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
393         if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
394                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
395                 __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
396         }
397         if (psw_mchk_disabled(vcpu))
398                 active_mask &= ~IRQ_PEND_MCHK_MASK;
399         /* PV guest cpus can have a single interruption injected at a time. */
400         if (kvm_s390_pv_cpu_get_handle(vcpu) &&
401             vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
402                 active_mask &= ~(IRQ_PEND_EXT_II_MASK |
403                                  IRQ_PEND_IO_MASK |
404                                  IRQ_PEND_MCHK_MASK);
405         /*
406          * Check both floating and local interrupt's cr14 because
407          * bit IRQ_PEND_MCHK_REP could be set in both cases.
408          */
409         if (!(vcpu->arch.sie_block->gcr[14] &
410            (vcpu->kvm->arch.float_int.mchk.cr14 |
411            vcpu->arch.local_int.irq.mchk.cr14)))
412                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
413
414         /*
415          * STOP irqs will never be actively delivered. They are triggered via
416          * intercept requests and cleared when the stop intercept is performed.
417          */
418         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
419
420         return active_mask;
421 }
422
423 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
424 {
425         kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
426         set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
427 }
428
429 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
430 {
431         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
432         clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
433 }
434
435 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
436 {
437         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
438                                       CPUSTAT_STOP_INT);
439         vcpu->arch.sie_block->lctl = 0x0000;
440         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
441
442         if (guestdbg_enabled(vcpu)) {
443                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
444                                                LCTL_CR10 | LCTL_CR11);
445                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
446         }
447 }
448
449 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
450 {
451         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
452                 return;
453         if (psw_ioint_disabled(vcpu))
454                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
455         else
456                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
457 }
458
459 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
460 {
461         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
462                 return;
463         if (psw_extint_disabled(vcpu))
464                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
465         else
466                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
467 }
468
469 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
470 {
471         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
472                 return;
473         if (psw_mchk_disabled(vcpu))
474                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
475         else
476                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
477 }
478
479 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
480 {
481         if (kvm_s390_is_stop_irq_pending(vcpu))
482                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
483 }
484
485 /* Set interception request for non-deliverable interrupts */
486 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
487 {
488         set_intercept_indicators_io(vcpu);
489         set_intercept_indicators_ext(vcpu);
490         set_intercept_indicators_mchk(vcpu);
491         set_intercept_indicators_stop(vcpu);
492 }
493
494 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
495 {
496         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
497         int rc = 0;
498
499         vcpu->stat.deliver_cputm++;
500         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
501                                          0, 0);
502         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
503                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
504                 vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
505         } else {
506                 rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
507                                    (u16 *)__LC_EXT_INT_CODE);
508                 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
509                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
510                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
511                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
512                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
513         }
514         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
515         return rc ? -EFAULT : 0;
516 }
517
518 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
519 {
520         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
521         int rc = 0;
522
523         vcpu->stat.deliver_ckc++;
524         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
525                                          0, 0);
526         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
527                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
528                 vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
529         } else {
530                 rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
531                                    (u16 __user *)__LC_EXT_INT_CODE);
532                 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
533                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
534                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
535                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
536                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
537         }
538         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
539         return rc ? -EFAULT : 0;
540 }
541
542 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
543 {
544         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
545         struct kvm_s390_ext_info ext;
546         int rc;
547
548         spin_lock(&li->lock);
549         ext = li->irq.ext;
550         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
551         li->irq.ext.ext_params2 = 0;
552         spin_unlock(&li->lock);
553
554         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
555                    ext.ext_params2);
556         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
557                                          KVM_S390_INT_PFAULT_INIT,
558                                          0, ext.ext_params2);
559
560         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
561         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
562         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
563                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
564         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
565                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
566         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
567         return rc ? -EFAULT : 0;
568 }
569
570 static int __write_machine_check(struct kvm_vcpu *vcpu,
571                                  struct kvm_s390_mchk_info *mchk)
572 {
573         unsigned long ext_sa_addr;
574         unsigned long lc;
575         freg_t fprs[NUM_FPRS];
576         union mci mci;
577         int rc;
578
579         /*
580          * All other possible payload for a machine check (e.g. the register
581          * contents in the save area) will be handled by the ultravisor, as
582          * the hypervisor does not not have the needed information for
583          * protected guests.
584          */
585         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
586                 vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
587                 vcpu->arch.sie_block->mcic = mchk->mcic;
588                 vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
589                 vcpu->arch.sie_block->edc = mchk->ext_damage_code;
590                 return 0;
591         }
592
593         mci.val = mchk->mcic;
594         /* take care of lazy register loading */
595         save_fpu_regs();
596         save_access_regs(vcpu->run->s.regs.acrs);
597         if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
598                 save_gs_cb(current->thread.gs_cb);
599
600         /* Extended save area */
601         rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
602                            sizeof(unsigned long));
603         /* Only bits 0 through 63-LC are used for address formation */
604         lc = ext_sa_addr & MCESA_LC_MASK;
605         if (test_kvm_facility(vcpu->kvm, 133)) {
606                 switch (lc) {
607                 case 0:
608                 case 10:
609                         ext_sa_addr &= ~0x3ffUL;
610                         break;
611                 case 11:
612                         ext_sa_addr &= ~0x7ffUL;
613                         break;
614                 case 12:
615                         ext_sa_addr &= ~0xfffUL;
616                         break;
617                 default:
618                         ext_sa_addr = 0;
619                         break;
620                 }
621         } else {
622                 ext_sa_addr &= ~0x3ffUL;
623         }
624
625         if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
626                 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
627                                     512))
628                         mci.vr = 0;
629         } else {
630                 mci.vr = 0;
631         }
632         if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
633             && (lc == 11 || lc == 12)) {
634                 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
635                                     &vcpu->run->s.regs.gscb, 32))
636                         mci.gs = 0;
637         } else {
638                 mci.gs = 0;
639         }
640
641         /* General interruption information */
642         rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
643         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
644                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
645         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
646                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
647         rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
648
649         /* Register-save areas */
650         if (MACHINE_HAS_VX) {
651                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
652                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
653         } else {
654                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
655                                      vcpu->run->s.regs.fprs, 128);
656         }
657         rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
658                              vcpu->run->s.regs.gprs, 128);
659         rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
660                            (u32 __user *) __LC_FP_CREG_SAVE_AREA);
661         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
662                            (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
663         rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
664                            (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
665         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
666                            (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
667         rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
668                              &vcpu->run->s.regs.acrs, 64);
669         rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
670                              &vcpu->arch.sie_block->gcr, 128);
671
672         /* Extended interruption information */
673         rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
674                            (u32 __user *) __LC_EXT_DAMAGE_CODE);
675         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
676                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
677         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
678                              sizeof(mchk->fixed_logout));
679         return rc ? -EFAULT : 0;
680 }
681
682 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
683 {
684         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
685         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
686         struct kvm_s390_mchk_info mchk = {};
687         int deliver = 0;
688         int rc = 0;
689
690         spin_lock(&fi->lock);
691         spin_lock(&li->lock);
692         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
693             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
694                 /*
695                  * If there was an exigent machine check pending, then any
696                  * repressible machine checks that might have been pending
697                  * are indicated along with it, so always clear bits for
698                  * repressible and exigent interrupts
699                  */
700                 mchk = li->irq.mchk;
701                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
702                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
703                 memset(&li->irq.mchk, 0, sizeof(mchk));
704                 deliver = 1;
705         }
706         /*
707          * We indicate floating repressible conditions along with
708          * other pending conditions. Channel Report Pending and Channel
709          * Subsystem damage are the only two and and are indicated by
710          * bits in mcic and masked in cr14.
711          */
712         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
713                 mchk.mcic |= fi->mchk.mcic;
714                 mchk.cr14 |= fi->mchk.cr14;
715                 memset(&fi->mchk, 0, sizeof(mchk));
716                 deliver = 1;
717         }
718         spin_unlock(&li->lock);
719         spin_unlock(&fi->lock);
720
721         if (deliver) {
722                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
723                            mchk.mcic);
724                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
725                                                  KVM_S390_MCHK,
726                                                  mchk.cr14, mchk.mcic);
727                 vcpu->stat.deliver_machine_check++;
728                 rc = __write_machine_check(vcpu, &mchk);
729         }
730         return rc;
731 }
732
733 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
734 {
735         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
736         int rc = 0;
737
738         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
739         vcpu->stat.deliver_restart_signal++;
740         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
741
742         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
743                 vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
744         } else {
745                 rc  = write_guest_lc(vcpu,
746                                      offsetof(struct lowcore, restart_old_psw),
747                                      &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
748                 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
749                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
750         }
751         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
752         return rc ? -EFAULT : 0;
753 }
754
755 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
756 {
757         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
758         struct kvm_s390_prefix_info prefix;
759
760         spin_lock(&li->lock);
761         prefix = li->irq.prefix;
762         li->irq.prefix.address = 0;
763         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
764         spin_unlock(&li->lock);
765
766         vcpu->stat.deliver_prefix_signal++;
767         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
768                                          KVM_S390_SIGP_SET_PREFIX,
769                                          prefix.address, 0);
770
771         kvm_s390_set_prefix(vcpu, prefix.address);
772         return 0;
773 }
774
775 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
776 {
777         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
778         int rc;
779         int cpu_addr;
780
781         spin_lock(&li->lock);
782         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
783         clear_bit(cpu_addr, li->sigp_emerg_pending);
784         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
785                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
786         spin_unlock(&li->lock);
787
788         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
789         vcpu->stat.deliver_emergency_signal++;
790         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
791                                          cpu_addr, 0);
792         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
793                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
794                 vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
795                 vcpu->arch.sie_block->extcpuaddr = cpu_addr;
796                 return 0;
797         }
798
799         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
800                            (u16 *)__LC_EXT_INT_CODE);
801         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
802         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
803                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
804         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
805                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
806         return rc ? -EFAULT : 0;
807 }
808
809 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
810 {
811         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
812         struct kvm_s390_extcall_info extcall;
813         int rc;
814
815         spin_lock(&li->lock);
816         extcall = li->irq.extcall;
817         li->irq.extcall.code = 0;
818         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
819         spin_unlock(&li->lock);
820
821         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
822         vcpu->stat.deliver_external_call++;
823         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
824                                          KVM_S390_INT_EXTERNAL_CALL,
825                                          extcall.code, 0);
826         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
827                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
828                 vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
829                 vcpu->arch.sie_block->extcpuaddr = extcall.code;
830                 return 0;
831         }
832
833         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
834                            (u16 *)__LC_EXT_INT_CODE);
835         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
836         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
837                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
838         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
839                             sizeof(psw_t));
840         return rc ? -EFAULT : 0;
841 }
842
843 static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
844 {
845         switch (code) {
846         case PGM_SPECIFICATION:
847                 vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
848                 break;
849         case PGM_OPERAND:
850                 vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
851                 break;
852         default:
853                 return -EINVAL;
854         }
855         return 0;
856 }
857
858 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
859 {
860         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
861         struct kvm_s390_pgm_info pgm_info;
862         int rc = 0, nullifying = false;
863         u16 ilen;
864
865         spin_lock(&li->lock);
866         pgm_info = li->irq.pgm;
867         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
868         memset(&li->irq.pgm, 0, sizeof(pgm_info));
869         spin_unlock(&li->lock);
870
871         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
872         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
873                    pgm_info.code, ilen);
874         vcpu->stat.deliver_program++;
875         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
876                                          pgm_info.code, 0);
877
878         /* PER is handled by the ultravisor */
879         if (kvm_s390_pv_cpu_is_protected(vcpu))
880                 return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
881
882         switch (pgm_info.code & ~PGM_PER) {
883         case PGM_AFX_TRANSLATION:
884         case PGM_ASX_TRANSLATION:
885         case PGM_EX_TRANSLATION:
886         case PGM_LFX_TRANSLATION:
887         case PGM_LSTE_SEQUENCE:
888         case PGM_LSX_TRANSLATION:
889         case PGM_LX_TRANSLATION:
890         case PGM_PRIMARY_AUTHORITY:
891         case PGM_SECONDARY_AUTHORITY:
892                 nullifying = true;
893                 fallthrough;
894         case PGM_SPACE_SWITCH:
895                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
896                                   (u64 *)__LC_TRANS_EXC_CODE);
897                 break;
898         case PGM_ALEN_TRANSLATION:
899         case PGM_ALE_SEQUENCE:
900         case PGM_ASTE_INSTANCE:
901         case PGM_ASTE_SEQUENCE:
902         case PGM_ASTE_VALIDITY:
903         case PGM_EXTENDED_AUTHORITY:
904                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
905                                   (u8 *)__LC_EXC_ACCESS_ID);
906                 nullifying = true;
907                 break;
908         case PGM_ASCE_TYPE:
909         case PGM_PAGE_TRANSLATION:
910         case PGM_REGION_FIRST_TRANS:
911         case PGM_REGION_SECOND_TRANS:
912         case PGM_REGION_THIRD_TRANS:
913         case PGM_SEGMENT_TRANSLATION:
914                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
915                                   (u64 *)__LC_TRANS_EXC_CODE);
916                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
917                                    (u8 *)__LC_EXC_ACCESS_ID);
918                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
919                                    (u8 *)__LC_OP_ACCESS_ID);
920                 nullifying = true;
921                 break;
922         case PGM_MONITOR:
923                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
924                                   (u16 *)__LC_MON_CLASS_NR);
925                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
926                                    (u64 *)__LC_MON_CODE);
927                 break;
928         case PGM_VECTOR_PROCESSING:
929         case PGM_DATA:
930                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
931                                   (u32 *)__LC_DATA_EXC_CODE);
932                 break;
933         case PGM_PROTECTION:
934                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
935                                   (u64 *)__LC_TRANS_EXC_CODE);
936                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
937                                    (u8 *)__LC_EXC_ACCESS_ID);
938                 break;
939         case PGM_STACK_FULL:
940         case PGM_STACK_EMPTY:
941         case PGM_STACK_SPECIFICATION:
942         case PGM_STACK_TYPE:
943         case PGM_STACK_OPERATION:
944         case PGM_TRACE_TABEL:
945         case PGM_CRYPTO_OPERATION:
946                 nullifying = true;
947                 break;
948         }
949
950         if (pgm_info.code & PGM_PER) {
951                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
952                                    (u8 *) __LC_PER_CODE);
953                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
954                                    (u8 *)__LC_PER_ATMID);
955                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
956                                    (u64 *) __LC_PER_ADDRESS);
957                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
958                                    (u8 *) __LC_PER_ACCESS_ID);
959         }
960
961         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
962                 kvm_s390_rewind_psw(vcpu, ilen);
963
964         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
965         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
966         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
967                                  (u64 *) __LC_LAST_BREAK);
968         rc |= put_guest_lc(vcpu, pgm_info.code,
969                            (u16 *)__LC_PGM_INT_CODE);
970         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
971                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
972         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
973                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
974         return rc ? -EFAULT : 0;
975 }
976
977 #define SCCB_MASK 0xFFFFFFF8
978 #define SCCB_EVENT_PENDING 0x3
979
980 static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
981 {
982         int rc;
983
984         if (kvm_s390_pv_cpu_get_handle(vcpu)) {
985                 vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
986                 vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
987                 vcpu->arch.sie_block->eiparams = parm;
988                 return 0;
989         }
990
991         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
992         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
993         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
994                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
995         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
996                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
997         rc |= put_guest_lc(vcpu, parm,
998                            (u32 *)__LC_EXT_PARAMS);
999
1000         return rc ? -EFAULT : 0;
1001 }
1002
1003 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
1004 {
1005         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1006         struct kvm_s390_ext_info ext;
1007
1008         spin_lock(&fi->lock);
1009         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
1010             !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
1011                 spin_unlock(&fi->lock);
1012                 return 0;
1013         }
1014         ext = fi->srv_signal;
1015         memset(&fi->srv_signal, 0, sizeof(ext));
1016         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1017         clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1018         if (kvm_s390_pv_cpu_is_protected(vcpu))
1019                 set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
1020         spin_unlock(&fi->lock);
1021
1022         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
1023                    ext.ext_params);
1024         vcpu->stat.deliver_service_signal++;
1025         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1026                                          ext.ext_params, 0);
1027
1028         return write_sclp(vcpu, ext.ext_params);
1029 }
1030
1031 static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
1032 {
1033         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1034         struct kvm_s390_ext_info ext;
1035
1036         spin_lock(&fi->lock);
1037         if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
1038                 spin_unlock(&fi->lock);
1039                 return 0;
1040         }
1041         ext = fi->srv_signal;
1042         /* only clear the event bit */
1043         fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
1044         clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1045         spin_unlock(&fi->lock);
1046
1047         VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
1048         vcpu->stat.deliver_service_signal++;
1049         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1050                                          ext.ext_params, 0);
1051
1052         return write_sclp(vcpu, SCCB_EVENT_PENDING);
1053 }
1054
1055 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
1056 {
1057         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1058         struct kvm_s390_interrupt_info *inti;
1059         int rc = 0;
1060
1061         spin_lock(&fi->lock);
1062         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
1063                                         struct kvm_s390_interrupt_info,
1064                                         list);
1065         if (inti) {
1066                 list_del(&inti->list);
1067                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
1068         }
1069         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
1070                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1071         spin_unlock(&fi->lock);
1072
1073         if (inti) {
1074                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1075                                                  KVM_S390_INT_PFAULT_DONE, 0,
1076                                                  inti->ext.ext_params2);
1077                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
1078                            inti->ext.ext_params2);
1079
1080                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1081                                 (u16 *)__LC_EXT_INT_CODE);
1082                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
1083                                 (u16 *)__LC_EXT_CPU_ADDR);
1084                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1085                                 &vcpu->arch.sie_block->gpsw,
1086                                 sizeof(psw_t));
1087                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1088                                 &vcpu->arch.sie_block->gpsw,
1089                                 sizeof(psw_t));
1090                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1091                                 (u64 *)__LC_EXT_PARAMS2);
1092                 kfree(inti);
1093         }
1094         return rc ? -EFAULT : 0;
1095 }
1096
1097 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
1098 {
1099         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1100         struct kvm_s390_interrupt_info *inti;
1101         int rc = 0;
1102
1103         spin_lock(&fi->lock);
1104         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
1105                                         struct kvm_s390_interrupt_info,
1106                                         list);
1107         if (inti) {
1108                 VCPU_EVENT(vcpu, 4,
1109                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
1110                            inti->ext.ext_params, inti->ext.ext_params2);
1111                 vcpu->stat.deliver_virtio++;
1112                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1113                                 inti->type,
1114                                 inti->ext.ext_params,
1115                                 inti->ext.ext_params2);
1116                 list_del(&inti->list);
1117                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
1118         }
1119         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
1120                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1121         spin_unlock(&fi->lock);
1122
1123         if (inti) {
1124                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1125                                 (u16 *)__LC_EXT_INT_CODE);
1126                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
1127                                 (u16 *)__LC_EXT_CPU_ADDR);
1128                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1129                                 &vcpu->arch.sie_block->gpsw,
1130                                 sizeof(psw_t));
1131                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1132                                 &vcpu->arch.sie_block->gpsw,
1133                                 sizeof(psw_t));
1134                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
1135                                 (u32 *)__LC_EXT_PARAMS);
1136                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1137                                 (u64 *)__LC_EXT_PARAMS2);
1138                 kfree(inti);
1139         }
1140         return rc ? -EFAULT : 0;
1141 }
1142
1143 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
1144 {
1145         int rc;
1146
1147         if (kvm_s390_pv_cpu_is_protected(vcpu)) {
1148                 vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
1149                 vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
1150                 vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
1151                 vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
1152                 vcpu->arch.sie_block->io_int_word = io->io_int_word;
1153                 return 0;
1154         }
1155
1156         rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
1157         rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
1158         rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
1159         rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
1160         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
1161                              &vcpu->arch.sie_block->gpsw,
1162                              sizeof(psw_t));
1163         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
1164                             &vcpu->arch.sie_block->gpsw,
1165                             sizeof(psw_t));
1166         return rc ? -EFAULT : 0;
1167 }
1168
1169 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
1170                                      unsigned long irq_type)
1171 {
1172         struct list_head *isc_list;
1173         struct kvm_s390_float_interrupt *fi;
1174         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1175         struct kvm_s390_interrupt_info *inti = NULL;
1176         struct kvm_s390_io_info io;
1177         u32 isc;
1178         int rc = 0;
1179
1180         fi = &vcpu->kvm->arch.float_int;
1181
1182         spin_lock(&fi->lock);
1183         isc = irq_type_to_isc(irq_type);
1184         isc_list = &fi->lists[isc];
1185         inti = list_first_entry_or_null(isc_list,
1186                                         struct kvm_s390_interrupt_info,
1187                                         list);
1188         if (inti) {
1189                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1190                         VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
1191                 else
1192                         VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
1193                         inti->io.subchannel_id >> 8,
1194                         inti->io.subchannel_id >> 1 & 0x3,
1195                         inti->io.subchannel_nr);
1196
1197                 vcpu->stat.deliver_io++;
1198                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1199                                 inti->type,
1200                                 ((__u32)inti->io.subchannel_id << 16) |
1201                                 inti->io.subchannel_nr,
1202                                 ((__u64)inti->io.io_int_parm << 32) |
1203                                 inti->io.io_int_word);
1204                 list_del(&inti->list);
1205                 fi->counters[FIRQ_CNTR_IO] -= 1;
1206         }
1207         if (list_empty(isc_list))
1208                 clear_bit(irq_type, &fi->pending_irqs);
1209         spin_unlock(&fi->lock);
1210
1211         if (inti) {
1212                 rc = __do_deliver_io(vcpu, &(inti->io));
1213                 kfree(inti);
1214                 goto out;
1215         }
1216
1217         if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
1218                 /*
1219                  * in case an adapter interrupt was not delivered
1220                  * in SIE context KVM will handle the delivery
1221                  */
1222                 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1223                 memset(&io, 0, sizeof(io));
1224                 io.io_int_word = isc_to_int_word(isc);
1225                 vcpu->stat.deliver_io++;
1226                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1227                         KVM_S390_INT_IO(1, 0, 0, 0),
1228                         ((__u32)io.subchannel_id << 16) |
1229                         io.subchannel_nr,
1230                         ((__u64)io.io_int_parm << 32) |
1231                         io.io_int_word);
1232                 rc = __do_deliver_io(vcpu, &io);
1233         }
1234 out:
1235         return rc;
1236 }
1237
1238 /* Check whether an external call is pending (deliverable or not) */
1239 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1240 {
1241         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1242
1243         if (!sclp.has_sigpif)
1244                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1245
1246         return sca_ext_call_pending(vcpu, NULL);
1247 }
1248
1249 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1250 {
1251         if (deliverable_irqs(vcpu))
1252                 return 1;
1253
1254         if (kvm_cpu_has_pending_timer(vcpu))
1255                 return 1;
1256
1257         /* external call pending and deliverable */
1258         if (kvm_s390_ext_call_pending(vcpu) &&
1259             !psw_extint_disabled(vcpu) &&
1260             (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
1261                 return 1;
1262
1263         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1264                 return 1;
1265         return 0;
1266 }
1267
1268 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1269 {
1270         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1271 }
1272
1273 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1274 {
1275         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1276         const u64 ckc = vcpu->arch.sie_block->ckc;
1277         u64 cputm, sltime = 0;
1278
1279         if (ckc_interrupts_enabled(vcpu)) {
1280                 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
1281                         if ((s64)now < (s64)ckc)
1282                                 sltime = tod_to_ns((s64)ckc - (s64)now);
1283                 } else if (now < ckc) {
1284                         sltime = tod_to_ns(ckc - now);
1285                 }
1286                 /* already expired */
1287                 if (!sltime)
1288                         return 0;
1289                 if (cpu_timer_interrupts_enabled(vcpu)) {
1290                         cputm = kvm_s390_get_cpu_timer(vcpu);
1291                         /* already expired? */
1292                         if (cputm >> 63)
1293                                 return 0;
1294                         return min(sltime, tod_to_ns(cputm));
1295                 }
1296         } else if (cpu_timer_interrupts_enabled(vcpu)) {
1297                 sltime = kvm_s390_get_cpu_timer(vcpu);
1298                 /* already expired? */
1299                 if (sltime >> 63)
1300                         return 0;
1301         }
1302         return sltime;
1303 }
1304
1305 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1306 {
1307         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1308         u64 sltime;
1309
1310         vcpu->stat.exit_wait_state++;
1311
1312         /* fast path */
1313         if (kvm_arch_vcpu_runnable(vcpu))
1314                 return 0;
1315
1316         if (psw_interrupts_disabled(vcpu)) {
1317                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1318                 return -EOPNOTSUPP; /* disabled wait */
1319         }
1320
1321         if (gi->origin &&
1322             (gisa_get_ipm_or_restore_iam(gi) &
1323              vcpu->arch.sie_block->gcr[6] >> 24))
1324                 return 0;
1325
1326         if (!ckc_interrupts_enabled(vcpu) &&
1327             !cpu_timer_interrupts_enabled(vcpu)) {
1328                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1329                 __set_cpu_idle(vcpu);
1330                 goto no_timer;
1331         }
1332
1333         sltime = __calculate_sltime(vcpu);
1334         if (!sltime)
1335                 return 0;
1336
1337         __set_cpu_idle(vcpu);
1338         hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1339         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1340 no_timer:
1341         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1342         kvm_vcpu_block(vcpu);
1343         __unset_cpu_idle(vcpu);
1344         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1345
1346         hrtimer_cancel(&vcpu->arch.ckc_timer);
1347         return 0;
1348 }
1349
1350 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1351 {
1352         vcpu->valid_wakeup = true;
1353         kvm_vcpu_wake_up(vcpu);
1354
1355         /*
1356          * The VCPU might not be sleeping but rather executing VSIE. Let's
1357          * kick it, so it leaves the SIE to process the request.
1358          */
1359         kvm_s390_vsie_kick(vcpu);
1360 }
1361
1362 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1363 {
1364         struct kvm_vcpu *vcpu;
1365         u64 sltime;
1366
1367         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1368         sltime = __calculate_sltime(vcpu);
1369
1370         /*
1371          * If the monotonic clock runs faster than the tod clock we might be
1372          * woken up too early and have to go back to sleep to avoid deadlocks.
1373          */
1374         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1375                 return HRTIMER_RESTART;
1376         kvm_s390_vcpu_wakeup(vcpu);
1377         return HRTIMER_NORESTART;
1378 }
1379
1380 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1381 {
1382         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1383
1384         spin_lock(&li->lock);
1385         li->pending_irqs = 0;
1386         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1387         memset(&li->irq, 0, sizeof(li->irq));
1388         spin_unlock(&li->lock);
1389
1390         sca_clear_ext_call(vcpu);
1391 }
1392
1393 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1394 {
1395         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1396         int rc = 0;
1397         unsigned long irq_type;
1398         unsigned long irqs;
1399
1400         __reset_intercept_indicators(vcpu);
1401
1402         /* pending ckc conditions might have been invalidated */
1403         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1404         if (ckc_irq_pending(vcpu))
1405                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1406
1407         /* pending cpu timer conditions might have been invalidated */
1408         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1409         if (cpu_timer_irq_pending(vcpu))
1410                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1411
1412         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1413                 /* bits are in the reverse order of interrupt priority */
1414                 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1415                 switch (irq_type) {
1416                 case IRQ_PEND_IO_ISC_0:
1417                 case IRQ_PEND_IO_ISC_1:
1418                 case IRQ_PEND_IO_ISC_2:
1419                 case IRQ_PEND_IO_ISC_3:
1420                 case IRQ_PEND_IO_ISC_4:
1421                 case IRQ_PEND_IO_ISC_5:
1422                 case IRQ_PEND_IO_ISC_6:
1423                 case IRQ_PEND_IO_ISC_7:
1424                         rc = __deliver_io(vcpu, irq_type);
1425                         break;
1426                 case IRQ_PEND_MCHK_EX:
1427                 case IRQ_PEND_MCHK_REP:
1428                         rc = __deliver_machine_check(vcpu);
1429                         break;
1430                 case IRQ_PEND_PROG:
1431                         rc = __deliver_prog(vcpu);
1432                         break;
1433                 case IRQ_PEND_EXT_EMERGENCY:
1434                         rc = __deliver_emergency_signal(vcpu);
1435                         break;
1436                 case IRQ_PEND_EXT_EXTERNAL:
1437                         rc = __deliver_external_call(vcpu);
1438                         break;
1439                 case IRQ_PEND_EXT_CLOCK_COMP:
1440                         rc = __deliver_ckc(vcpu);
1441                         break;
1442                 case IRQ_PEND_EXT_CPU_TIMER:
1443                         rc = __deliver_cpu_timer(vcpu);
1444                         break;
1445                 case IRQ_PEND_RESTART:
1446                         rc = __deliver_restart(vcpu);
1447                         break;
1448                 case IRQ_PEND_SET_PREFIX:
1449                         rc = __deliver_set_prefix(vcpu);
1450                         break;
1451                 case IRQ_PEND_PFAULT_INIT:
1452                         rc = __deliver_pfault_init(vcpu);
1453                         break;
1454                 case IRQ_PEND_EXT_SERVICE:
1455                         rc = __deliver_service(vcpu);
1456                         break;
1457                 case IRQ_PEND_EXT_SERVICE_EV:
1458                         rc = __deliver_service_ev(vcpu);
1459                         break;
1460                 case IRQ_PEND_PFAULT_DONE:
1461                         rc = __deliver_pfault_done(vcpu);
1462                         break;
1463                 case IRQ_PEND_VIRTIO:
1464                         rc = __deliver_virtio(vcpu);
1465                         break;
1466                 default:
1467                         WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1468                         clear_bit(irq_type, &li->pending_irqs);
1469                 }
1470         }
1471
1472         set_intercept_indicators(vcpu);
1473
1474         return rc;
1475 }
1476
1477 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1478 {
1479         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1480
1481         vcpu->stat.inject_program++;
1482         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1483         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1484                                    irq->u.pgm.code, 0);
1485
1486         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1487                 /* auto detection if no valid ILC was given */
1488                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1489                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1490                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1491         }
1492
1493         if (irq->u.pgm.code == PGM_PER) {
1494                 li->irq.pgm.code |= PGM_PER;
1495                 li->irq.pgm.flags = irq->u.pgm.flags;
1496                 /* only modify PER related information */
1497                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1498                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1499                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1500                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1501         } else if (!(irq->u.pgm.code & PGM_PER)) {
1502                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1503                                    irq->u.pgm.code;
1504                 li->irq.pgm.flags = irq->u.pgm.flags;
1505                 /* only modify non-PER information */
1506                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1507                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1508                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1509                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1510                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1511                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1512         } else {
1513                 li->irq.pgm = irq->u.pgm;
1514         }
1515         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1516         return 0;
1517 }
1518
1519 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1520 {
1521         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1522
1523         vcpu->stat.inject_pfault_init++;
1524         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1525                    irq->u.ext.ext_params2);
1526         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1527                                    irq->u.ext.ext_params,
1528                                    irq->u.ext.ext_params2);
1529
1530         li->irq.ext = irq->u.ext;
1531         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1532         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1533         return 0;
1534 }
1535
1536 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1537 {
1538         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1539         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1540         uint16_t src_id = irq->u.extcall.code;
1541
1542         vcpu->stat.inject_external_call++;
1543         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1544                    src_id);
1545         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1546                                    src_id, 0);
1547
1548         /* sending vcpu invalid */
1549         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1550                 return -EINVAL;
1551
1552         if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
1553                 return sca_inject_ext_call(vcpu, src_id);
1554
1555         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1556                 return -EBUSY;
1557         *extcall = irq->u.extcall;
1558         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1559         return 0;
1560 }
1561
1562 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1563 {
1564         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1565         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1566
1567         vcpu->stat.inject_set_prefix++;
1568         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1569                    irq->u.prefix.address);
1570         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1571                                    irq->u.prefix.address, 0);
1572
1573         if (!is_vcpu_stopped(vcpu))
1574                 return -EBUSY;
1575
1576         *prefix = irq->u.prefix;
1577         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1578         return 0;
1579 }
1580
1581 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1582 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1583 {
1584         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1585         struct kvm_s390_stop_info *stop = &li->irq.stop;
1586         int rc = 0;
1587
1588         vcpu->stat.inject_stop_signal++;
1589         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1590
1591         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1592                 return -EINVAL;
1593
1594         if (is_vcpu_stopped(vcpu)) {
1595                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1596                         rc = kvm_s390_store_status_unloaded(vcpu,
1597                                                 KVM_S390_STORE_STATUS_NOADDR);
1598                 return rc;
1599         }
1600
1601         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1602                 return -EBUSY;
1603         stop->flags = irq->u.stop.flags;
1604         kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1605         return 0;
1606 }
1607
1608 static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
1609 {
1610         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1611
1612         vcpu->stat.inject_restart++;
1613         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1614         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1615
1616         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1617         return 0;
1618 }
1619
1620 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1621                                    struct kvm_s390_irq *irq)
1622 {
1623         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1624
1625         vcpu->stat.inject_emergency_signal++;
1626         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1627                    irq->u.emerg.code);
1628         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1629                                    irq->u.emerg.code, 0);
1630
1631         /* sending vcpu invalid */
1632         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1633                 return -EINVAL;
1634
1635         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1636         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1637         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1638         return 0;
1639 }
1640
1641 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1642 {
1643         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1644         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1645
1646         vcpu->stat.inject_mchk++;
1647         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1648                    irq->u.mchk.mcic);
1649         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1650                                    irq->u.mchk.mcic);
1651
1652         /*
1653          * Because repressible machine checks can be indicated along with
1654          * exigent machine checks (PoP, Chapter 11, Interruption action)
1655          * we need to combine cr14, mcic and external damage code.
1656          * Failing storage address and the logout area should not be or'ed
1657          * together, we just indicate the last occurrence of the corresponding
1658          * machine check
1659          */
1660         mchk->cr14 |= irq->u.mchk.cr14;
1661         mchk->mcic |= irq->u.mchk.mcic;
1662         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1663         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1664         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1665                sizeof(mchk->fixed_logout));
1666         if (mchk->mcic & MCHK_EX_MASK)
1667                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1668         else if (mchk->mcic & MCHK_REP_MASK)
1669                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1670         return 0;
1671 }
1672
1673 static int __inject_ckc(struct kvm_vcpu *vcpu)
1674 {
1675         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1676
1677         vcpu->stat.inject_ckc++;
1678         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1679         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1680                                    0, 0);
1681
1682         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1683         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1684         return 0;
1685 }
1686
1687 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1688 {
1689         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1690
1691         vcpu->stat.inject_cputm++;
1692         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1693         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1694                                    0, 0);
1695
1696         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1697         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1698         return 0;
1699 }
1700
1701 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1702                                                   int isc, u32 schid)
1703 {
1704         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1705         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1706         struct kvm_s390_interrupt_info *iter;
1707         u16 id = (schid & 0xffff0000U) >> 16;
1708         u16 nr = schid & 0x0000ffffU;
1709
1710         spin_lock(&fi->lock);
1711         list_for_each_entry(iter, isc_list, list) {
1712                 if (schid && (id != iter->io.subchannel_id ||
1713                               nr != iter->io.subchannel_nr))
1714                         continue;
1715                 /* found an appropriate entry */
1716                 list_del_init(&iter->list);
1717                 fi->counters[FIRQ_CNTR_IO] -= 1;
1718                 if (list_empty(isc_list))
1719                         clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1720                 spin_unlock(&fi->lock);
1721                 return iter;
1722         }
1723         spin_unlock(&fi->lock);
1724         return NULL;
1725 }
1726
1727 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1728                                                       u64 isc_mask, u32 schid)
1729 {
1730         struct kvm_s390_interrupt_info *inti = NULL;
1731         int isc;
1732
1733         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1734                 if (isc_mask & isc_to_isc_bits(isc))
1735                         inti = get_io_int(kvm, isc, schid);
1736         }
1737         return inti;
1738 }
1739
1740 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1741 {
1742         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1743         unsigned long active_mask;
1744         int isc;
1745
1746         if (schid)
1747                 goto out;
1748         if (!gi->origin)
1749                 goto out;
1750
1751         active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
1752         while (active_mask) {
1753                 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1754                 if (gisa_tac_ipm_gisc(gi->origin, isc))
1755                         return isc;
1756                 clear_bit_inv(isc, &active_mask);
1757         }
1758 out:
1759         return -EINVAL;
1760 }
1761
1762 /*
1763  * Dequeue and return an I/O interrupt matching any of the interruption
1764  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1765  * Take into account the interrupts pending in the interrupt list and in GISA.
1766  *
1767  * Note that for a guest that does not enable I/O interrupts
1768  * but relies on TPI, a flood of classic interrupts may starve
1769  * out adapter interrupts on the same isc. Linux does not do
1770  * that, and it is possible to work around the issue by configuring
1771  * different iscs for classic and adapter interrupts in the guest,
1772  * but we may want to revisit this in the future.
1773  */
1774 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1775                                                     u64 isc_mask, u32 schid)
1776 {
1777         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1778         struct kvm_s390_interrupt_info *inti, *tmp_inti;
1779         int isc;
1780
1781         inti = get_top_io_int(kvm, isc_mask, schid);
1782
1783         isc = get_top_gisa_isc(kvm, isc_mask, schid);
1784         if (isc < 0)
1785                 /* no AI in GISA */
1786                 goto out;
1787
1788         if (!inti)
1789                 /* AI in GISA but no classical IO int */
1790                 goto gisa_out;
1791
1792         /* both types of interrupts present */
1793         if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1794                 /* classical IO int with higher priority */
1795                 gisa_set_ipm_gisc(gi->origin, isc);
1796                 goto out;
1797         }
1798 gisa_out:
1799         tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1800         if (tmp_inti) {
1801                 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1802                 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1803                 if (inti)
1804                         kvm_s390_reinject_io_int(kvm, inti);
1805                 inti = tmp_inti;
1806         } else
1807                 gisa_set_ipm_gisc(gi->origin, isc);
1808 out:
1809         return inti;
1810 }
1811
1812 static int __inject_service(struct kvm *kvm,
1813                              struct kvm_s390_interrupt_info *inti)
1814 {
1815         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1816
1817         kvm->stat.inject_service_signal++;
1818         spin_lock(&fi->lock);
1819         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1820
1821         /* We always allow events, track them separately from the sccb ints */
1822         if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
1823                 set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1824
1825         /*
1826          * Early versions of the QEMU s390 bios will inject several
1827          * service interrupts after another without handling a
1828          * condition code indicating busy.
1829          * We will silently ignore those superfluous sccb values.
1830          * A future version of QEMU will take care of serialization
1831          * of servc requests
1832          */
1833         if (fi->srv_signal.ext_params & SCCB_MASK)
1834                 goto out;
1835         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1836         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1837 out:
1838         spin_unlock(&fi->lock);
1839         kfree(inti);
1840         return 0;
1841 }
1842
1843 static int __inject_virtio(struct kvm *kvm,
1844                             struct kvm_s390_interrupt_info *inti)
1845 {
1846         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1847
1848         kvm->stat.inject_virtio++;
1849         spin_lock(&fi->lock);
1850         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1851                 spin_unlock(&fi->lock);
1852                 return -EBUSY;
1853         }
1854         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1855         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1856         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1857         spin_unlock(&fi->lock);
1858         return 0;
1859 }
1860
1861 static int __inject_pfault_done(struct kvm *kvm,
1862                                  struct kvm_s390_interrupt_info *inti)
1863 {
1864         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1865
1866         kvm->stat.inject_pfault_done++;
1867         spin_lock(&fi->lock);
1868         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1869                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1870                 spin_unlock(&fi->lock);
1871                 return -EBUSY;
1872         }
1873         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1874         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1875         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1876         spin_unlock(&fi->lock);
1877         return 0;
1878 }
1879
1880 #define CR_PENDING_SUBCLASS 28
1881 static int __inject_float_mchk(struct kvm *kvm,
1882                                 struct kvm_s390_interrupt_info *inti)
1883 {
1884         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1885
1886         kvm->stat.inject_float_mchk++;
1887         spin_lock(&fi->lock);
1888         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1889         fi->mchk.mcic |= inti->mchk.mcic;
1890         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1891         spin_unlock(&fi->lock);
1892         kfree(inti);
1893         return 0;
1894 }
1895
1896 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1897 {
1898         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1899         struct kvm_s390_float_interrupt *fi;
1900         struct list_head *list;
1901         int isc;
1902
1903         kvm->stat.inject_io++;
1904         isc = int_word_to_isc(inti->io.io_int_word);
1905
1906         /*
1907          * Do not make use of gisa in protected mode. We do not use the lock
1908          * checking variant as this is just a performance optimization and we
1909          * do not hold the lock here. This is ok as the code will pick
1910          * interrupts from both "lists" for delivery.
1911          */
1912         if (!kvm_s390_pv_get_handle(kvm) &&
1913             gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
1914                 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1915                 gisa_set_ipm_gisc(gi->origin, isc);
1916                 kfree(inti);
1917                 return 0;
1918         }
1919
1920         fi = &kvm->arch.float_int;
1921         spin_lock(&fi->lock);
1922         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1923                 spin_unlock(&fi->lock);
1924                 return -EBUSY;
1925         }
1926         fi->counters[FIRQ_CNTR_IO] += 1;
1927
1928         if (inti->type & KVM_S390_INT_IO_AI_MASK)
1929                 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1930         else
1931                 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1932                         inti->io.subchannel_id >> 8,
1933                         inti->io.subchannel_id >> 1 & 0x3,
1934                         inti->io.subchannel_nr);
1935         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1936         list_add_tail(&inti->list, list);
1937         set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1938         spin_unlock(&fi->lock);
1939         return 0;
1940 }
1941
1942 /*
1943  * Find a destination VCPU for a floating irq and kick it.
1944  */
1945 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1946 {
1947         struct kvm_vcpu *dst_vcpu;
1948         int sigcpu, online_vcpus, nr_tries = 0;
1949
1950         online_vcpus = atomic_read(&kvm->online_vcpus);
1951         if (!online_vcpus)
1952                 return;
1953
1954         /* find idle VCPUs first, then round robin */
1955         sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
1956         if (sigcpu == online_vcpus) {
1957                 do {
1958                         sigcpu = kvm->arch.float_int.next_rr_cpu++;
1959                         kvm->arch.float_int.next_rr_cpu %= online_vcpus;
1960                         /* avoid endless loops if all vcpus are stopped */
1961                         if (nr_tries++ >= online_vcpus)
1962                                 return;
1963                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1964         }
1965         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1966
1967         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1968         switch (type) {
1969         case KVM_S390_MCHK:
1970                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1971                 break;
1972         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1973                 if (!(type & KVM_S390_INT_IO_AI_MASK &&
1974                       kvm->arch.gisa_int.origin) ||
1975                       kvm_s390_pv_cpu_get_handle(dst_vcpu))
1976                         kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1977                 break;
1978         default:
1979                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1980                 break;
1981         }
1982         kvm_s390_vcpu_wakeup(dst_vcpu);
1983 }
1984
1985 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1986 {
1987         u64 type = READ_ONCE(inti->type);
1988         int rc;
1989
1990         switch (type) {
1991         case KVM_S390_MCHK:
1992                 rc = __inject_float_mchk(kvm, inti);
1993                 break;
1994         case KVM_S390_INT_VIRTIO:
1995                 rc = __inject_virtio(kvm, inti);
1996                 break;
1997         case KVM_S390_INT_SERVICE:
1998                 rc = __inject_service(kvm, inti);
1999                 break;
2000         case KVM_S390_INT_PFAULT_DONE:
2001                 rc = __inject_pfault_done(kvm, inti);
2002                 break;
2003         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2004                 rc = __inject_io(kvm, inti);
2005                 break;
2006         default:
2007                 rc = -EINVAL;
2008         }
2009         if (rc)
2010                 return rc;
2011
2012         __floating_irq_kick(kvm, type);
2013         return 0;
2014 }
2015
2016 int kvm_s390_inject_vm(struct kvm *kvm,
2017                        struct kvm_s390_interrupt *s390int)
2018 {
2019         struct kvm_s390_interrupt_info *inti;
2020         int rc;
2021
2022         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2023         if (!inti)
2024                 return -ENOMEM;
2025
2026         inti->type = s390int->type;
2027         switch (inti->type) {
2028         case KVM_S390_INT_VIRTIO:
2029                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
2030                          s390int->parm, s390int->parm64);
2031                 inti->ext.ext_params = s390int->parm;
2032                 inti->ext.ext_params2 = s390int->parm64;
2033                 break;
2034         case KVM_S390_INT_SERVICE:
2035                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
2036                 inti->ext.ext_params = s390int->parm;
2037                 break;
2038         case KVM_S390_INT_PFAULT_DONE:
2039                 inti->ext.ext_params2 = s390int->parm64;
2040                 break;
2041         case KVM_S390_MCHK:
2042                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
2043                          s390int->parm64);
2044                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
2045                 inti->mchk.mcic = s390int->parm64;
2046                 break;
2047         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2048                 inti->io.subchannel_id = s390int->parm >> 16;
2049                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
2050                 inti->io.io_int_parm = s390int->parm64 >> 32;
2051                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
2052                 break;
2053         default:
2054                 kfree(inti);
2055                 return -EINVAL;
2056         }
2057         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2058                                  2);
2059
2060         rc = __inject_vm(kvm, inti);
2061         if (rc)
2062                 kfree(inti);
2063         return rc;
2064 }
2065
2066 int kvm_s390_reinject_io_int(struct kvm *kvm,
2067                               struct kvm_s390_interrupt_info *inti)
2068 {
2069         return __inject_vm(kvm, inti);
2070 }
2071
2072 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
2073                        struct kvm_s390_irq *irq)
2074 {
2075         irq->type = s390int->type;
2076         switch (irq->type) {
2077         case KVM_S390_PROGRAM_INT:
2078                 if (s390int->parm & 0xffff0000)
2079                         return -EINVAL;
2080                 irq->u.pgm.code = s390int->parm;
2081                 break;
2082         case KVM_S390_SIGP_SET_PREFIX:
2083                 irq->u.prefix.address = s390int->parm;
2084                 break;
2085         case KVM_S390_SIGP_STOP:
2086                 irq->u.stop.flags = s390int->parm;
2087                 break;
2088         case KVM_S390_INT_EXTERNAL_CALL:
2089                 if (s390int->parm & 0xffff0000)
2090                         return -EINVAL;
2091                 irq->u.extcall.code = s390int->parm;
2092                 break;
2093         case KVM_S390_INT_EMERGENCY:
2094                 if (s390int->parm & 0xffff0000)
2095                         return -EINVAL;
2096                 irq->u.emerg.code = s390int->parm;
2097                 break;
2098         case KVM_S390_MCHK:
2099                 irq->u.mchk.mcic = s390int->parm64;
2100                 break;
2101         case KVM_S390_INT_PFAULT_INIT:
2102                 irq->u.ext.ext_params = s390int->parm;
2103                 irq->u.ext.ext_params2 = s390int->parm64;
2104                 break;
2105         case KVM_S390_RESTART:
2106         case KVM_S390_INT_CLOCK_COMP:
2107         case KVM_S390_INT_CPU_TIMER:
2108                 break;
2109         default:
2110                 return -EINVAL;
2111         }
2112         return 0;
2113 }
2114
2115 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
2116 {
2117         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2118
2119         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2120 }
2121
2122 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
2123 {
2124         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2125
2126         return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
2127 }
2128
2129 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
2130 {
2131         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2132
2133         spin_lock(&li->lock);
2134         li->irq.stop.flags = 0;
2135         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2136         spin_unlock(&li->lock);
2137 }
2138
2139 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2140 {
2141         int rc;
2142
2143         switch (irq->type) {
2144         case KVM_S390_PROGRAM_INT:
2145                 rc = __inject_prog(vcpu, irq);
2146                 break;
2147         case KVM_S390_SIGP_SET_PREFIX:
2148                 rc = __inject_set_prefix(vcpu, irq);
2149                 break;
2150         case KVM_S390_SIGP_STOP:
2151                 rc = __inject_sigp_stop(vcpu, irq);
2152                 break;
2153         case KVM_S390_RESTART:
2154                 rc = __inject_sigp_restart(vcpu);
2155                 break;
2156         case KVM_S390_INT_CLOCK_COMP:
2157                 rc = __inject_ckc(vcpu);
2158                 break;
2159         case KVM_S390_INT_CPU_TIMER:
2160                 rc = __inject_cpu_timer(vcpu);
2161                 break;
2162         case KVM_S390_INT_EXTERNAL_CALL:
2163                 rc = __inject_extcall(vcpu, irq);
2164                 break;
2165         case KVM_S390_INT_EMERGENCY:
2166                 rc = __inject_sigp_emergency(vcpu, irq);
2167                 break;
2168         case KVM_S390_MCHK:
2169                 rc = __inject_mchk(vcpu, irq);
2170                 break;
2171         case KVM_S390_INT_PFAULT_INIT:
2172                 rc = __inject_pfault_init(vcpu, irq);
2173                 break;
2174         case KVM_S390_INT_VIRTIO:
2175         case KVM_S390_INT_SERVICE:
2176         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2177         default:
2178                 rc = -EINVAL;
2179         }
2180
2181         return rc;
2182 }
2183
2184 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2185 {
2186         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2187         int rc;
2188
2189         spin_lock(&li->lock);
2190         rc = do_inject_vcpu(vcpu, irq);
2191         spin_unlock(&li->lock);
2192         if (!rc)
2193                 kvm_s390_vcpu_wakeup(vcpu);
2194         return rc;
2195 }
2196
2197 static inline void clear_irq_list(struct list_head *_list)
2198 {
2199         struct kvm_s390_interrupt_info *inti, *n;
2200
2201         list_for_each_entry_safe(inti, n, _list, list) {
2202                 list_del(&inti->list);
2203                 kfree(inti);
2204         }
2205 }
2206
2207 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2208                        struct kvm_s390_irq *irq)
2209 {
2210         irq->type = inti->type;
2211         switch (inti->type) {
2212         case KVM_S390_INT_PFAULT_INIT:
2213         case KVM_S390_INT_PFAULT_DONE:
2214         case KVM_S390_INT_VIRTIO:
2215                 irq->u.ext = inti->ext;
2216                 break;
2217         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2218                 irq->u.io = inti->io;
2219                 break;
2220         }
2221 }
2222
2223 void kvm_s390_clear_float_irqs(struct kvm *kvm)
2224 {
2225         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2226         int i;
2227
2228         mutex_lock(&kvm->lock);
2229         if (!kvm_s390_pv_is_protected(kvm))
2230                 fi->masked_irqs = 0;
2231         mutex_unlock(&kvm->lock);
2232         spin_lock(&fi->lock);
2233         fi->pending_irqs = 0;
2234         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2235         memset(&fi->mchk, 0, sizeof(fi->mchk));
2236         for (i = 0; i < FIRQ_LIST_COUNT; i++)
2237                 clear_irq_list(&fi->lists[i]);
2238         for (i = 0; i < FIRQ_MAX_COUNT; i++)
2239                 fi->counters[i] = 0;
2240         spin_unlock(&fi->lock);
2241         kvm_s390_gisa_clear(kvm);
2242 };
2243
2244 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2245 {
2246         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2247         struct kvm_s390_interrupt_info *inti;
2248         struct kvm_s390_float_interrupt *fi;
2249         struct kvm_s390_irq *buf;
2250         struct kvm_s390_irq *irq;
2251         int max_irqs;
2252         int ret = 0;
2253         int n = 0;
2254         int i;
2255
2256         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2257                 return -EINVAL;
2258
2259         /*
2260          * We are already using -ENOMEM to signal
2261          * userspace it may retry with a bigger buffer,
2262          * so we need to use something else for this case
2263          */
2264         buf = vzalloc(len);
2265         if (!buf)
2266                 return -ENOBUFS;
2267
2268         max_irqs = len / sizeof(struct kvm_s390_irq);
2269
2270         if (gi->origin && gisa_get_ipm(gi->origin)) {
2271                 for (i = 0; i <= MAX_ISC; i++) {
2272                         if (n == max_irqs) {
2273                                 /* signal userspace to try again */
2274                                 ret = -ENOMEM;
2275                                 goto out_nolock;
2276                         }
2277                         if (gisa_tac_ipm_gisc(gi->origin, i)) {
2278                                 irq = (struct kvm_s390_irq *) &buf[n];
2279                                 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2280                                 irq->u.io.io_int_word = isc_to_int_word(i);
2281                                 n++;
2282                         }
2283                 }
2284         }
2285         fi = &kvm->arch.float_int;
2286         spin_lock(&fi->lock);
2287         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2288                 list_for_each_entry(inti, &fi->lists[i], list) {
2289                         if (n == max_irqs) {
2290                                 /* signal userspace to try again */
2291                                 ret = -ENOMEM;
2292                                 goto out;
2293                         }
2294                         inti_to_irq(inti, &buf[n]);
2295                         n++;
2296                 }
2297         }
2298         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
2299             test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
2300                 if (n == max_irqs) {
2301                         /* signal userspace to try again */
2302                         ret = -ENOMEM;
2303                         goto out;
2304                 }
2305                 irq = (struct kvm_s390_irq *) &buf[n];
2306                 irq->type = KVM_S390_INT_SERVICE;
2307                 irq->u.ext = fi->srv_signal;
2308                 n++;
2309         }
2310         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2311                 if (n == max_irqs) {
2312                                 /* signal userspace to try again */
2313                                 ret = -ENOMEM;
2314                                 goto out;
2315                 }
2316                 irq = (struct kvm_s390_irq *) &buf[n];
2317                 irq->type = KVM_S390_MCHK;
2318                 irq->u.mchk = fi->mchk;
2319                 n++;
2320 }
2321
2322 out:
2323         spin_unlock(&fi->lock);
2324 out_nolock:
2325         if (!ret && n > 0) {
2326                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2327                         ret = -EFAULT;
2328         }
2329         vfree(buf);
2330
2331         return ret < 0 ? ret : n;
2332 }
2333
2334 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2335 {
2336         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2337         struct kvm_s390_ais_all ais;
2338
2339         if (attr->attr < sizeof(ais))
2340                 return -EINVAL;
2341
2342         if (!test_kvm_facility(kvm, 72))
2343                 return -EOPNOTSUPP;
2344
2345         mutex_lock(&fi->ais_lock);
2346         ais.simm = fi->simm;
2347         ais.nimm = fi->nimm;
2348         mutex_unlock(&fi->ais_lock);
2349
2350         if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2351                 return -EFAULT;
2352
2353         return 0;
2354 }
2355
2356 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2357 {
2358         int r;
2359
2360         switch (attr->group) {
2361         case KVM_DEV_FLIC_GET_ALL_IRQS:
2362                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2363                                           attr->attr);
2364                 break;
2365         case KVM_DEV_FLIC_AISM_ALL:
2366                 r = flic_ais_mode_get_all(dev->kvm, attr);
2367                 break;
2368         default:
2369                 r = -EINVAL;
2370         }
2371
2372         return r;
2373 }
2374
2375 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2376                                      u64 addr)
2377 {
2378         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2379         void *target = NULL;
2380         void __user *source;
2381         u64 size;
2382
2383         if (get_user(inti->type, (u64 __user *)addr))
2384                 return -EFAULT;
2385
2386         switch (inti->type) {
2387         case KVM_S390_INT_PFAULT_INIT:
2388         case KVM_S390_INT_PFAULT_DONE:
2389         case KVM_S390_INT_VIRTIO:
2390         case KVM_S390_INT_SERVICE:
2391                 target = (void *) &inti->ext;
2392                 source = &uptr->u.ext;
2393                 size = sizeof(inti->ext);
2394                 break;
2395         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2396                 target = (void *) &inti->io;
2397                 source = &uptr->u.io;
2398                 size = sizeof(inti->io);
2399                 break;
2400         case KVM_S390_MCHK:
2401                 target = (void *) &inti->mchk;
2402                 source = &uptr->u.mchk;
2403                 size = sizeof(inti->mchk);
2404                 break;
2405         default:
2406                 return -EINVAL;
2407         }
2408
2409         if (copy_from_user(target, source, size))
2410                 return -EFAULT;
2411
2412         return 0;
2413 }
2414
2415 static int enqueue_floating_irq(struct kvm_device *dev,
2416                                 struct kvm_device_attr *attr)
2417 {
2418         struct kvm_s390_interrupt_info *inti = NULL;
2419         int r = 0;
2420         int len = attr->attr;
2421
2422         if (len % sizeof(struct kvm_s390_irq) != 0)
2423                 return -EINVAL;
2424         else if (len > KVM_S390_FLIC_MAX_BUFFER)
2425                 return -EINVAL;
2426
2427         while (len >= sizeof(struct kvm_s390_irq)) {
2428                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2429                 if (!inti)
2430                         return -ENOMEM;
2431
2432                 r = copy_irq_from_user(inti, attr->addr);
2433                 if (r) {
2434                         kfree(inti);
2435                         return r;
2436                 }
2437                 r = __inject_vm(dev->kvm, inti);
2438                 if (r) {
2439                         kfree(inti);
2440                         return r;
2441                 }
2442                 len -= sizeof(struct kvm_s390_irq);
2443                 attr->addr += sizeof(struct kvm_s390_irq);
2444         }
2445
2446         return r;
2447 }
2448
2449 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2450 {
2451         if (id >= MAX_S390_IO_ADAPTERS)
2452                 return NULL;
2453         id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
2454         return kvm->arch.adapters[id];
2455 }
2456
2457 static int register_io_adapter(struct kvm_device *dev,
2458                                struct kvm_device_attr *attr)
2459 {
2460         struct s390_io_adapter *adapter;
2461         struct kvm_s390_io_adapter adapter_info;
2462
2463         if (copy_from_user(&adapter_info,
2464                            (void __user *)attr->addr, sizeof(adapter_info)))
2465                 return -EFAULT;
2466
2467         if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
2468                 return -EINVAL;
2469
2470         adapter_info.id = array_index_nospec(adapter_info.id,
2471                                              MAX_S390_IO_ADAPTERS);
2472
2473         if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
2474                 return -EINVAL;
2475
2476         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2477         if (!adapter)
2478                 return -ENOMEM;
2479
2480         adapter->id = adapter_info.id;
2481         adapter->isc = adapter_info.isc;
2482         adapter->maskable = adapter_info.maskable;
2483         adapter->masked = false;
2484         adapter->swap = adapter_info.swap;
2485         adapter->suppressible = (adapter_info.flags) &
2486                                 KVM_S390_ADAPTER_SUPPRESSIBLE;
2487         dev->kvm->arch.adapters[adapter->id] = adapter;
2488
2489         return 0;
2490 }
2491
2492 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2493 {
2494         int ret;
2495         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2496
2497         if (!adapter || !adapter->maskable)
2498                 return -EINVAL;
2499         ret = adapter->masked;
2500         adapter->masked = masked;
2501         return ret;
2502 }
2503
2504 void kvm_s390_destroy_adapters(struct kvm *kvm)
2505 {
2506         int i;
2507
2508         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
2509                 kfree(kvm->arch.adapters[i]);
2510 }
2511
2512 static int modify_io_adapter(struct kvm_device *dev,
2513                              struct kvm_device_attr *attr)
2514 {
2515         struct kvm_s390_io_adapter_req req;
2516         struct s390_io_adapter *adapter;
2517         int ret;
2518
2519         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2520                 return -EFAULT;
2521
2522         adapter = get_io_adapter(dev->kvm, req.id);
2523         if (!adapter)
2524                 return -EINVAL;
2525         switch (req.type) {
2526         case KVM_S390_IO_ADAPTER_MASK:
2527                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2528                 if (ret > 0)
2529                         ret = 0;
2530                 break;
2531         /*
2532          * The following operations are no longer needed and therefore no-ops.
2533          * The gpa to hva translation is done when an IRQ route is set up. The
2534          * set_irq code uses get_user_pages_remote() to do the actual write.
2535          */
2536         case KVM_S390_IO_ADAPTER_MAP:
2537         case KVM_S390_IO_ADAPTER_UNMAP:
2538                 ret = 0;
2539                 break;
2540         default:
2541                 ret = -EINVAL;
2542         }
2543
2544         return ret;
2545 }
2546
2547 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2548
2549 {
2550         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2551         u32 schid;
2552
2553         if (attr->flags)
2554                 return -EINVAL;
2555         if (attr->attr != sizeof(schid))
2556                 return -EINVAL;
2557         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2558                 return -EFAULT;
2559         if (!schid)
2560                 return -EINVAL;
2561         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2562         /*
2563          * If userspace is conforming to the architecture, we can have at most
2564          * one pending I/O interrupt per subchannel, so this is effectively a
2565          * clear all.
2566          */
2567         return 0;
2568 }
2569
2570 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2571 {
2572         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2573         struct kvm_s390_ais_req req;
2574         int ret = 0;
2575
2576         if (!test_kvm_facility(kvm, 72))
2577                 return -EOPNOTSUPP;
2578
2579         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2580                 return -EFAULT;
2581
2582         if (req.isc > MAX_ISC)
2583                 return -EINVAL;
2584
2585         trace_kvm_s390_modify_ais_mode(req.isc,
2586                                        (fi->simm & AIS_MODE_MASK(req.isc)) ?
2587                                        (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2588                                        2 : KVM_S390_AIS_MODE_SINGLE :
2589                                        KVM_S390_AIS_MODE_ALL, req.mode);
2590
2591         mutex_lock(&fi->ais_lock);
2592         switch (req.mode) {
2593         case KVM_S390_AIS_MODE_ALL:
2594                 fi->simm &= ~AIS_MODE_MASK(req.isc);
2595                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2596                 break;
2597         case KVM_S390_AIS_MODE_SINGLE:
2598                 fi->simm |= AIS_MODE_MASK(req.isc);
2599                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2600                 break;
2601         default:
2602                 ret = -EINVAL;
2603         }
2604         mutex_unlock(&fi->ais_lock);
2605
2606         return ret;
2607 }
2608
2609 static int kvm_s390_inject_airq(struct kvm *kvm,
2610                                 struct s390_io_adapter *adapter)
2611 {
2612         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2613         struct kvm_s390_interrupt s390int = {
2614                 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2615                 .parm = 0,
2616                 .parm64 = isc_to_int_word(adapter->isc),
2617         };
2618         int ret = 0;
2619
2620         if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2621                 return kvm_s390_inject_vm(kvm, &s390int);
2622
2623         mutex_lock(&fi->ais_lock);
2624         if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2625                 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2626                 goto out;
2627         }
2628
2629         ret = kvm_s390_inject_vm(kvm, &s390int);
2630         if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2631                 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2632                 trace_kvm_s390_modify_ais_mode(adapter->isc,
2633                                                KVM_S390_AIS_MODE_SINGLE, 2);
2634         }
2635 out:
2636         mutex_unlock(&fi->ais_lock);
2637         return ret;
2638 }
2639
2640 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2641 {
2642         unsigned int id = attr->attr;
2643         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2644
2645         if (!adapter)
2646                 return -EINVAL;
2647
2648         return kvm_s390_inject_airq(kvm, adapter);
2649 }
2650
2651 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2652 {
2653         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2654         struct kvm_s390_ais_all ais;
2655
2656         if (!test_kvm_facility(kvm, 72))
2657                 return -EOPNOTSUPP;
2658
2659         if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2660                 return -EFAULT;
2661
2662         mutex_lock(&fi->ais_lock);
2663         fi->simm = ais.simm;
2664         fi->nimm = ais.nimm;
2665         mutex_unlock(&fi->ais_lock);
2666
2667         return 0;
2668 }
2669
2670 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2671 {
2672         int r = 0;
2673         unsigned int i;
2674         struct kvm_vcpu *vcpu;
2675
2676         switch (attr->group) {
2677         case KVM_DEV_FLIC_ENQUEUE:
2678                 r = enqueue_floating_irq(dev, attr);
2679                 break;
2680         case KVM_DEV_FLIC_CLEAR_IRQS:
2681                 kvm_s390_clear_float_irqs(dev->kvm);
2682                 break;
2683         case KVM_DEV_FLIC_APF_ENABLE:
2684                 dev->kvm->arch.gmap->pfault_enabled = 1;
2685                 break;
2686         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2687                 dev->kvm->arch.gmap->pfault_enabled = 0;
2688                 /*
2689                  * Make sure no async faults are in transition when
2690                  * clearing the queues. So we don't need to worry
2691                  * about late coming workers.
2692                  */
2693                 synchronize_srcu(&dev->kvm->srcu);
2694                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2695                         kvm_clear_async_pf_completion_queue(vcpu);
2696                 break;
2697         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2698                 r = register_io_adapter(dev, attr);
2699                 break;
2700         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2701                 r = modify_io_adapter(dev, attr);
2702                 break;
2703         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2704                 r = clear_io_irq(dev->kvm, attr);
2705                 break;
2706         case KVM_DEV_FLIC_AISM:
2707                 r = modify_ais_mode(dev->kvm, attr);
2708                 break;
2709         case KVM_DEV_FLIC_AIRQ_INJECT:
2710                 r = flic_inject_airq(dev->kvm, attr);
2711                 break;
2712         case KVM_DEV_FLIC_AISM_ALL:
2713                 r = flic_ais_mode_set_all(dev->kvm, attr);
2714                 break;
2715         default:
2716                 r = -EINVAL;
2717         }
2718
2719         return r;
2720 }
2721
2722 static int flic_has_attr(struct kvm_device *dev,
2723                              struct kvm_device_attr *attr)
2724 {
2725         switch (attr->group) {
2726         case KVM_DEV_FLIC_GET_ALL_IRQS:
2727         case KVM_DEV_FLIC_ENQUEUE:
2728         case KVM_DEV_FLIC_CLEAR_IRQS:
2729         case KVM_DEV_FLIC_APF_ENABLE:
2730         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2731         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2732         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2733         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2734         case KVM_DEV_FLIC_AISM:
2735         case KVM_DEV_FLIC_AIRQ_INJECT:
2736         case KVM_DEV_FLIC_AISM_ALL:
2737                 return 0;
2738         }
2739         return -ENXIO;
2740 }
2741
2742 static int flic_create(struct kvm_device *dev, u32 type)
2743 {
2744         if (!dev)
2745                 return -EINVAL;
2746         if (dev->kvm->arch.flic)
2747                 return -EINVAL;
2748         dev->kvm->arch.flic = dev;
2749         return 0;
2750 }
2751
2752 static void flic_destroy(struct kvm_device *dev)
2753 {
2754         dev->kvm->arch.flic = NULL;
2755         kfree(dev);
2756 }
2757
2758 /* s390 floating irq controller (flic) */
2759 struct kvm_device_ops kvm_flic_ops = {
2760         .name = "kvm-flic",
2761         .get_attr = flic_get_attr,
2762         .set_attr = flic_set_attr,
2763         .has_attr = flic_has_attr,
2764         .create = flic_create,
2765         .destroy = flic_destroy,
2766 };
2767
2768 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2769 {
2770         unsigned long bit;
2771
2772         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2773
2774         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2775 }
2776
2777 static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
2778 {
2779         struct page *page = NULL;
2780
2781         mmap_read_lock(kvm->mm);
2782         get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
2783                               &page, NULL, NULL);
2784         mmap_read_unlock(kvm->mm);
2785         return page;
2786 }
2787
2788 static int adapter_indicators_set(struct kvm *kvm,
2789                                   struct s390_io_adapter *adapter,
2790                                   struct kvm_s390_adapter_int *adapter_int)
2791 {
2792         unsigned long bit;
2793         int summary_set, idx;
2794         struct page *ind_page, *summary_page;
2795         void *map;
2796
2797         ind_page = get_map_page(kvm, adapter_int->ind_addr);
2798         if (!ind_page)
2799                 return -1;
2800         summary_page = get_map_page(kvm, adapter_int->summary_addr);
2801         if (!summary_page) {
2802                 put_page(ind_page);
2803                 return -1;
2804         }
2805
2806         idx = srcu_read_lock(&kvm->srcu);
2807         map = page_address(ind_page);
2808         bit = get_ind_bit(adapter_int->ind_addr,
2809                           adapter_int->ind_offset, adapter->swap);
2810         set_bit(bit, map);
2811         mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
2812         set_page_dirty_lock(ind_page);
2813         map = page_address(summary_page);
2814         bit = get_ind_bit(adapter_int->summary_addr,
2815                           adapter_int->summary_offset, adapter->swap);
2816         summary_set = test_and_set_bit(bit, map);
2817         mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
2818         set_page_dirty_lock(summary_page);
2819         srcu_read_unlock(&kvm->srcu, idx);
2820
2821         put_page(ind_page);
2822         put_page(summary_page);
2823         return summary_set ? 0 : 1;
2824 }
2825
2826 /*
2827  * < 0 - not injected due to error
2828  * = 0 - coalesced, summary indicator already active
2829  * > 0 - injected interrupt
2830  */
2831 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2832                            struct kvm *kvm, int irq_source_id, int level,
2833                            bool line_status)
2834 {
2835         int ret;
2836         struct s390_io_adapter *adapter;
2837
2838         /* We're only interested in the 0->1 transition. */
2839         if (!level)
2840                 return 0;
2841         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2842         if (!adapter)
2843                 return -1;
2844         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2845         if ((ret > 0) && !adapter->masked) {
2846                 ret = kvm_s390_inject_airq(kvm, adapter);
2847                 if (ret == 0)
2848                         ret = 1;
2849         }
2850         return ret;
2851 }
2852
2853 /*
2854  * Inject the machine check to the guest.
2855  */
2856 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2857                                      struct mcck_volatile_info *mcck_info)
2858 {
2859         struct kvm_s390_interrupt_info inti;
2860         struct kvm_s390_irq irq;
2861         struct kvm_s390_mchk_info *mchk;
2862         union mci mci;
2863         __u64 cr14 = 0;         /* upper bits are not used */
2864         int rc;
2865
2866         mci.val = mcck_info->mcic;
2867         if (mci.sr)
2868                 cr14 |= CR14_RECOVERY_SUBMASK;
2869         if (mci.dg)
2870                 cr14 |= CR14_DEGRADATION_SUBMASK;
2871         if (mci.w)
2872                 cr14 |= CR14_WARNING_SUBMASK;
2873
2874         mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2875         mchk->cr14 = cr14;
2876         mchk->mcic = mcck_info->mcic;
2877         mchk->ext_damage_code = mcck_info->ext_damage_code;
2878         mchk->failing_storage_address = mcck_info->failing_storage_address;
2879         if (mci.ck) {
2880                 /* Inject the floating machine check */
2881                 inti.type = KVM_S390_MCHK;
2882                 rc = __inject_vm(vcpu->kvm, &inti);
2883         } else {
2884                 /* Inject the machine check to specified vcpu */
2885                 irq.type = KVM_S390_MCHK;
2886                 rc = kvm_s390_inject_vcpu(vcpu, &irq);
2887         }
2888         WARN_ON_ONCE(rc);
2889 }
2890
2891 int kvm_set_routing_entry(struct kvm *kvm,
2892                           struct kvm_kernel_irq_routing_entry *e,
2893                           const struct kvm_irq_routing_entry *ue)
2894 {
2895         u64 uaddr;
2896
2897         switch (ue->type) {
2898         /* we store the userspace addresses instead of the guest addresses */
2899         case KVM_IRQ_ROUTING_S390_ADAPTER:
2900                 e->set = set_adapter_int;
2901                 uaddr =  gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
2902                 if (uaddr == -EFAULT)
2903                         return -EFAULT;
2904                 e->adapter.summary_addr = uaddr;
2905                 uaddr =  gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
2906                 if (uaddr == -EFAULT)
2907                         return -EFAULT;
2908                 e->adapter.ind_addr = uaddr;
2909                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2910                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2911                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2912                 return 0;
2913         default:
2914                 return -EINVAL;
2915         }
2916 }
2917
2918 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2919                 int irq_source_id, int level, bool line_status)
2920 {
2921         return -EINVAL;
2922 }
2923
2924 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2925 {
2926         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2927         struct kvm_s390_irq *buf;
2928         int r = 0;
2929         int n;
2930
2931         buf = vmalloc(len);
2932         if (!buf)
2933                 return -ENOMEM;
2934
2935         if (copy_from_user((void *) buf, irqstate, len)) {
2936                 r = -EFAULT;
2937                 goto out_free;
2938         }
2939
2940         /*
2941          * Don't allow setting the interrupt state
2942          * when there are already interrupts pending
2943          */
2944         spin_lock(&li->lock);
2945         if (li->pending_irqs) {
2946                 r = -EBUSY;
2947                 goto out_unlock;
2948         }
2949
2950         for (n = 0; n < len / sizeof(*buf); n++) {
2951                 r = do_inject_vcpu(vcpu, &buf[n]);
2952                 if (r)
2953                         break;
2954         }
2955
2956 out_unlock:
2957         spin_unlock(&li->lock);
2958 out_free:
2959         vfree(buf);
2960
2961         return r;
2962 }
2963
2964 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2965                             struct kvm_s390_irq *irq,
2966                             unsigned long irq_type)
2967 {
2968         switch (irq_type) {
2969         case IRQ_PEND_MCHK_EX:
2970         case IRQ_PEND_MCHK_REP:
2971                 irq->type = KVM_S390_MCHK;
2972                 irq->u.mchk = li->irq.mchk;
2973                 break;
2974         case IRQ_PEND_PROG:
2975                 irq->type = KVM_S390_PROGRAM_INT;
2976                 irq->u.pgm = li->irq.pgm;
2977                 break;
2978         case IRQ_PEND_PFAULT_INIT:
2979                 irq->type = KVM_S390_INT_PFAULT_INIT;
2980                 irq->u.ext = li->irq.ext;
2981                 break;
2982         case IRQ_PEND_EXT_EXTERNAL:
2983                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2984                 irq->u.extcall = li->irq.extcall;
2985                 break;
2986         case IRQ_PEND_EXT_CLOCK_COMP:
2987                 irq->type = KVM_S390_INT_CLOCK_COMP;
2988                 break;
2989         case IRQ_PEND_EXT_CPU_TIMER:
2990                 irq->type = KVM_S390_INT_CPU_TIMER;
2991                 break;
2992         case IRQ_PEND_SIGP_STOP:
2993                 irq->type = KVM_S390_SIGP_STOP;
2994                 irq->u.stop = li->irq.stop;
2995                 break;
2996         case IRQ_PEND_RESTART:
2997                 irq->type = KVM_S390_RESTART;
2998                 break;
2999         case IRQ_PEND_SET_PREFIX:
3000                 irq->type = KVM_S390_SIGP_SET_PREFIX;
3001                 irq->u.prefix = li->irq.prefix;
3002                 break;
3003         }
3004 }
3005
3006 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
3007 {
3008         int scn;
3009         DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
3010         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
3011         unsigned long pending_irqs;
3012         struct kvm_s390_irq irq;
3013         unsigned long irq_type;
3014         int cpuaddr;
3015         int n = 0;
3016
3017         spin_lock(&li->lock);
3018         pending_irqs = li->pending_irqs;
3019         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
3020                sizeof(sigp_emerg_pending));
3021         spin_unlock(&li->lock);
3022
3023         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
3024                 memset(&irq, 0, sizeof(irq));
3025                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
3026                         continue;
3027                 if (n + sizeof(irq) > len)
3028                         return -ENOBUFS;
3029                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
3030                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3031                         return -EFAULT;
3032                 n += sizeof(irq);
3033         }
3034
3035         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
3036                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
3037                         memset(&irq, 0, sizeof(irq));
3038                         if (n + sizeof(irq) > len)
3039                                 return -ENOBUFS;
3040                         irq.type = KVM_S390_INT_EMERGENCY;
3041                         irq.u.emerg.code = cpuaddr;
3042                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3043                                 return -EFAULT;
3044                         n += sizeof(irq);
3045                 }
3046         }
3047
3048         if (sca_ext_call_pending(vcpu, &scn)) {
3049                 if (n + sizeof(irq) > len)
3050                         return -ENOBUFS;
3051                 memset(&irq, 0, sizeof(irq));
3052                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
3053                 irq.u.extcall.code = scn;
3054                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3055                         return -EFAULT;
3056                 n += sizeof(irq);
3057         }
3058
3059         return n;
3060 }
3061
3062 static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
3063 {
3064         int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
3065         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3066         struct kvm_vcpu *vcpu;
3067         u8 vcpu_isc_mask;
3068
3069         for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
3070                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
3071                 if (psw_ioint_disabled(vcpu))
3072                         continue;
3073                 vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
3074                 if (deliverable_mask & vcpu_isc_mask) {
3075                         /* lately kicked but not yet running */
3076                         if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
3077                                 return;
3078                         kvm_s390_vcpu_wakeup(vcpu);
3079                         return;
3080                 }
3081         }
3082 }
3083
3084 static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
3085 {
3086         struct kvm_s390_gisa_interrupt *gi =
3087                 container_of(timer, struct kvm_s390_gisa_interrupt, timer);
3088         struct kvm *kvm =
3089                 container_of(gi->origin, struct sie_page2, gisa)->kvm;
3090         u8 pending_mask;
3091
3092         pending_mask = gisa_get_ipm_or_restore_iam(gi);
3093         if (pending_mask) {
3094                 __airqs_kick_single_vcpu(kvm, pending_mask);
3095                 hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
3096                 return HRTIMER_RESTART;
3097         }
3098
3099         return HRTIMER_NORESTART;
3100 }
3101
3102 #define NULL_GISA_ADDR 0x00000000UL
3103 #define NONE_GISA_ADDR 0x00000001UL
3104 #define GISA_ADDR_MASK 0xfffff000UL
3105
3106 static void process_gib_alert_list(void)
3107 {
3108         struct kvm_s390_gisa_interrupt *gi;
3109         struct kvm_s390_gisa *gisa;
3110         struct kvm *kvm;
3111         u32 final, origin = 0UL;
3112
3113         do {
3114                 /*
3115                  * If the NONE_GISA_ADDR is still stored in the alert list
3116                  * origin, we will leave the outer loop. No further GISA has
3117                  * been added to the alert list by millicode while processing
3118                  * the current alert list.
3119                  */
3120                 final = (origin & NONE_GISA_ADDR);
3121                 /*
3122                  * Cut off the alert list and store the NONE_GISA_ADDR in the
3123                  * alert list origin to avoid further GAL interruptions.
3124                  * A new alert list can be build up by millicode in parallel
3125                  * for guests not in the yet cut-off alert list. When in the
3126                  * final loop, store the NULL_GISA_ADDR instead. This will re-
3127                  * enable GAL interruptions on the host again.
3128                  */
3129                 origin = xchg(&gib->alert_list_origin,
3130                               (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
3131                 /*
3132                  * Loop through the just cut-off alert list and start the
3133                  * gisa timers to kick idle vcpus to consume the pending
3134                  * interruptions asap.
3135                  */
3136                 while (origin & GISA_ADDR_MASK) {
3137                         gisa = (struct kvm_s390_gisa *)(u64)origin;
3138                         origin = gisa->next_alert;
3139                         gisa->next_alert = (u32)(u64)gisa;
3140                         kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
3141                         gi = &kvm->arch.gisa_int;
3142                         if (hrtimer_active(&gi->timer))
3143                                 hrtimer_cancel(&gi->timer);
3144                         hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3145                 }
3146         } while (!final);
3147
3148 }
3149
3150 void kvm_s390_gisa_clear(struct kvm *kvm)
3151 {
3152         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3153
3154         if (!gi->origin)
3155                 return;
3156         gisa_clear_ipm(gi->origin);
3157         VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
3158 }
3159
3160 void kvm_s390_gisa_init(struct kvm *kvm)
3161 {
3162         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3163
3164         if (!css_general_characteristics.aiv)
3165                 return;
3166         gi->origin = &kvm->arch.sie_page2->gisa;
3167         gi->alert.mask = 0;
3168         spin_lock_init(&gi->alert.ref_lock);
3169         gi->expires = 50 * 1000; /* 50 usec */
3170         hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3171         gi->timer.function = gisa_vcpu_kicker;
3172         memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
3173         gi->origin->next_alert = (u32)(u64)gi->origin;
3174         VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
3175 }
3176
3177 void kvm_s390_gisa_destroy(struct kvm *kvm)
3178 {
3179         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3180
3181         if (!gi->origin)
3182                 return;
3183         if (gi->alert.mask)
3184                 KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
3185                           kvm, gi->alert.mask);
3186         while (gisa_in_alert_list(gi->origin))
3187                 cpu_relax();
3188         hrtimer_cancel(&gi->timer);
3189         gi->origin = NULL;
3190 }
3191
3192 /**
3193  * kvm_s390_gisc_register - register a guest ISC
3194  *
3195  * @kvm:  the kernel vm to work with
3196  * @gisc: the guest interruption sub class to register
3197  *
3198  * The function extends the vm specific alert mask to use.
3199  * The effective IAM mask in the GISA is updated as well
3200  * in case the GISA is not part of the GIB alert list.
3201  * It will be updated latest when the IAM gets restored
3202  * by gisa_get_ipm_or_restore_iam().
3203  *
3204  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3205  *          has registered with the channel subsystem.
3206  *          -ENODEV in case the vm uses no GISA
3207  *          -ERANGE in case the guest ISC is invalid
3208  */
3209 int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
3210 {
3211         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3212
3213         if (!gi->origin)
3214                 return -ENODEV;
3215         if (gisc > MAX_ISC)
3216                 return -ERANGE;
3217
3218         spin_lock(&gi->alert.ref_lock);
3219         gi->alert.ref_count[gisc]++;
3220         if (gi->alert.ref_count[gisc] == 1) {
3221                 gi->alert.mask |= 0x80 >> gisc;
3222                 gisa_set_iam(gi->origin, gi->alert.mask);
3223         }
3224         spin_unlock(&gi->alert.ref_lock);
3225
3226         return gib->nisc;
3227 }
3228 EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
3229
3230 /**
3231  * kvm_s390_gisc_unregister - unregister a guest ISC
3232  *
3233  * @kvm:  the kernel vm to work with
3234  * @gisc: the guest interruption sub class to register
3235  *
3236  * The function reduces the vm specific alert mask to use.
3237  * The effective IAM mask in the GISA is updated as well
3238  * in case the GISA is not part of the GIB alert list.
3239  * It will be updated latest when the IAM gets restored
3240  * by gisa_get_ipm_or_restore_iam().
3241  *
3242  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3243  *          has registered with the channel subsystem.
3244  *          -ENODEV in case the vm uses no GISA
3245  *          -ERANGE in case the guest ISC is invalid
3246  *          -EINVAL in case the guest ISC is not registered
3247  */
3248 int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
3249 {
3250         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3251         int rc = 0;
3252
3253         if (!gi->origin)
3254                 return -ENODEV;
3255         if (gisc > MAX_ISC)
3256                 return -ERANGE;
3257
3258         spin_lock(&gi->alert.ref_lock);
3259         if (gi->alert.ref_count[gisc] == 0) {
3260                 rc = -EINVAL;
3261                 goto out;
3262         }
3263         gi->alert.ref_count[gisc]--;
3264         if (gi->alert.ref_count[gisc] == 0) {
3265                 gi->alert.mask &= ~(0x80 >> gisc);
3266                 gisa_set_iam(gi->origin, gi->alert.mask);
3267         }
3268 out:
3269         spin_unlock(&gi->alert.ref_lock);
3270
3271         return rc;
3272 }
3273 EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
3274
3275 static void gib_alert_irq_handler(struct airq_struct *airq, bool floating)
3276 {
3277         inc_irq_stat(IRQIO_GAL);
3278         process_gib_alert_list();
3279 }
3280
3281 static struct airq_struct gib_alert_irq = {
3282         .handler = gib_alert_irq_handler,
3283         .lsi_ptr = &gib_alert_irq.lsi_mask,
3284 };
3285
3286 void kvm_s390_gib_destroy(void)
3287 {
3288         if (!gib)
3289                 return;
3290         chsc_sgib(0);
3291         unregister_adapter_interrupt(&gib_alert_irq);
3292         free_page((unsigned long)gib);
3293         gib = NULL;
3294 }
3295
3296 int kvm_s390_gib_init(u8 nisc)
3297 {
3298         int rc = 0;
3299
3300         if (!css_general_characteristics.aiv) {
3301                 KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3302                 goto out;
3303         }
3304
3305         gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
3306         if (!gib) {
3307                 rc = -ENOMEM;
3308                 goto out;
3309         }
3310
3311         gib_alert_irq.isc = nisc;
3312         if (register_adapter_interrupt(&gib_alert_irq)) {
3313                 pr_err("Registering the GIB alert interruption handler failed\n");
3314                 rc = -EIO;
3315                 goto out_free_gib;
3316         }
3317
3318         gib->nisc = nisc;
3319         if (chsc_sgib((u32)(u64)gib)) {
3320                 pr_err("Associating the GIB with the AIV facility failed\n");
3321                 free_page((unsigned long)gib);
3322                 gib = NULL;
3323                 rc = -EIO;
3324                 goto out_unreg_gal;
3325         }
3326
3327         KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
3328         goto out;
3329
3330 out_unreg_gal:
3331         unregister_adapter_interrupt(&gib_alert_irq);
3332 out_free_gib:
3333         free_page((unsigned long)gib);
3334         gib = NULL;
3335 out:
3336         return rc;
3337 }