Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008, 2015
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/dis.h>
23 #include <asm/uaccess.h>
24 #include <asm/sclp.h>
25 #include <asm/isc.h>
26 #include "kvm-s390.h"
27 #include "gaccess.h"
28 #include "trace-s390.h"
29
30 #define IOINT_SCHID_MASK 0x0000ffff
31 #define IOINT_SSID_MASK 0x00030000
32 #define IOINT_CSSID_MASK 0x03fc0000
33 #define PFAULT_INIT 0x0600
34 #define PFAULT_DONE 0x0680
35 #define VIRTIO_PARAM 0x0d00
36
37 /* handle external calls via sigp interpretation facility */
38 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
39 {
40         int c, scn;
41
42         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
43                 return 0;
44
45         read_lock(&vcpu->kvm->arch.sca_lock);
46         if (vcpu->kvm->arch.use_esca) {
47                 struct esca_block *sca = vcpu->kvm->arch.sca;
48                 union esca_sigp_ctrl sigp_ctrl =
49                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
50
51                 c = sigp_ctrl.c;
52                 scn = sigp_ctrl.scn;
53         } else {
54                 struct bsca_block *sca = vcpu->kvm->arch.sca;
55                 union bsca_sigp_ctrl sigp_ctrl =
56                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
57
58                 c = sigp_ctrl.c;
59                 scn = sigp_ctrl.scn;
60         }
61         read_unlock(&vcpu->kvm->arch.sca_lock);
62
63         if (src_id)
64                 *src_id = scn;
65
66         return c;
67 }
68
69 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
70 {
71         int expect, rc;
72
73         read_lock(&vcpu->kvm->arch.sca_lock);
74         if (vcpu->kvm->arch.use_esca) {
75                 struct esca_block *sca = vcpu->kvm->arch.sca;
76                 union esca_sigp_ctrl *sigp_ctrl =
77                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
78                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
79
80                 new_val.scn = src_id;
81                 new_val.c = 1;
82                 old_val.c = 0;
83
84                 expect = old_val.value;
85                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
86         } else {
87                 struct bsca_block *sca = vcpu->kvm->arch.sca;
88                 union bsca_sigp_ctrl *sigp_ctrl =
89                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
90                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
91
92                 new_val.scn = src_id;
93                 new_val.c = 1;
94                 old_val.c = 0;
95
96                 expect = old_val.value;
97                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
98         }
99         read_unlock(&vcpu->kvm->arch.sca_lock);
100
101         if (rc != expect) {
102                 /* another external call is pending */
103                 return -EBUSY;
104         }
105         atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
106         return 0;
107 }
108
109 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
110 {
111         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
112         int rc, expect;
113
114         atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
115         read_lock(&vcpu->kvm->arch.sca_lock);
116         if (vcpu->kvm->arch.use_esca) {
117                 struct esca_block *sca = vcpu->kvm->arch.sca;
118                 union esca_sigp_ctrl *sigp_ctrl =
119                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120                 union esca_sigp_ctrl old = *sigp_ctrl;
121
122                 expect = old.value;
123                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124         } else {
125                 struct bsca_block *sca = vcpu->kvm->arch.sca;
126                 union bsca_sigp_ctrl *sigp_ctrl =
127                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128                 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130                 expect = old.value;
131                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132         }
133         read_unlock(&vcpu->kvm->arch.sca_lock);
134         WARN_ON(rc != expect); /* cannot clear? */
135 }
136
137 int psw_extint_disabled(struct kvm_vcpu *vcpu)
138 {
139         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140 }
141
142 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143 {
144         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145 }
146
147 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148 {
149         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150 }
151
152 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153 {
154         return psw_extint_disabled(vcpu) &&
155                psw_ioint_disabled(vcpu) &&
156                psw_mchk_disabled(vcpu);
157 }
158
159 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160 {
161         if (psw_extint_disabled(vcpu) ||
162             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163                 return 0;
164         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165                 /* No timer interrupts when single stepping */
166                 return 0;
167         return 1;
168 }
169
170 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171 {
172         if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
173                 return 0;
174         return ckc_interrupts_enabled(vcpu);
175 }
176
177 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
178 {
179         return !psw_extint_disabled(vcpu) &&
180                (vcpu->arch.sie_block->gcr[0] & 0x400ul);
181 }
182
183 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
184 {
185         if (!cpu_timer_interrupts_enabled(vcpu))
186                 return 0;
187         return kvm_s390_get_cpu_timer(vcpu) >> 63;
188 }
189
190 static inline int is_ioirq(unsigned long irq_type)
191 {
192         return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
193                 (irq_type <= IRQ_PEND_IO_ISC_7));
194 }
195
196 static uint64_t isc_to_isc_bits(int isc)
197 {
198         return (0x80 >> isc) << 24;
199 }
200
201 static inline u8 int_word_to_isc(u32 int_word)
202 {
203         return (int_word & 0x38000000) >> 27;
204 }
205
206 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
207 {
208         return vcpu->kvm->arch.float_int.pending_irqs |
209                vcpu->arch.local_int.pending_irqs;
210 }
211
212 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
213                                    unsigned long active_mask)
214 {
215         int i;
216
217         for (i = 0; i <= MAX_ISC; i++)
218                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
219                         active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
220
221         return active_mask;
222 }
223
224 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
225 {
226         unsigned long active_mask;
227
228         active_mask = pending_irqs(vcpu);
229         if (!active_mask)
230                 return 0;
231
232         if (psw_extint_disabled(vcpu))
233                 active_mask &= ~IRQ_PEND_EXT_MASK;
234         if (psw_ioint_disabled(vcpu))
235                 active_mask &= ~IRQ_PEND_IO_MASK;
236         else
237                 active_mask = disable_iscs(vcpu, active_mask);
238         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
239                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
240         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
241                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
242         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
243                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
244         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
245                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
246         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
247                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
248         if (psw_mchk_disabled(vcpu))
249                 active_mask &= ~IRQ_PEND_MCHK_MASK;
250         if (!(vcpu->arch.sie_block->gcr[14] &
251               vcpu->kvm->arch.float_int.mchk.cr14))
252                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
253
254         /*
255          * STOP irqs will never be actively delivered. They are triggered via
256          * intercept requests and cleared when the stop intercept is performed.
257          */
258         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
259
260         return active_mask;
261 }
262
263 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
264 {
265         atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
266         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
267 }
268
269 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
270 {
271         atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
272         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
273 }
274
275 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
276 {
277         atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
278                     &vcpu->arch.sie_block->cpuflags);
279         vcpu->arch.sie_block->lctl = 0x0000;
280         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
281
282         if (guestdbg_enabled(vcpu)) {
283                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
284                                                LCTL_CR10 | LCTL_CR11);
285                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
286         }
287 }
288
289 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
290 {
291         atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
292 }
293
294 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
295 {
296         if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
297                 return;
298         else if (psw_ioint_disabled(vcpu))
299                 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
300         else
301                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
302 }
303
304 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
305 {
306         if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
307                 return;
308         if (psw_extint_disabled(vcpu))
309                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
310         else
311                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
312 }
313
314 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
315 {
316         if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
317                 return;
318         if (psw_mchk_disabled(vcpu))
319                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
320         else
321                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
322 }
323
324 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
325 {
326         if (kvm_s390_is_stop_irq_pending(vcpu))
327                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
328 }
329
330 /* Set interception request for non-deliverable interrupts */
331 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
332 {
333         set_intercept_indicators_io(vcpu);
334         set_intercept_indicators_ext(vcpu);
335         set_intercept_indicators_mchk(vcpu);
336         set_intercept_indicators_stop(vcpu);
337 }
338
339 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
340 {
341         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
342         int rc;
343
344         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
345                                          0, 0);
346
347         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
348                            (u16 *)__LC_EXT_INT_CODE);
349         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
350         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
351                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
352         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
353                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
354         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
355         return rc ? -EFAULT : 0;
356 }
357
358 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
359 {
360         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
361         int rc;
362
363         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
364                                          0, 0);
365
366         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
367                            (u16 __user *)__LC_EXT_INT_CODE);
368         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
369         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
370                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
371         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
372                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
373         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
374         return rc ? -EFAULT : 0;
375 }
376
377 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
378 {
379         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
380         struct kvm_s390_ext_info ext;
381         int rc;
382
383         spin_lock(&li->lock);
384         ext = li->irq.ext;
385         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
386         li->irq.ext.ext_params2 = 0;
387         spin_unlock(&li->lock);
388
389         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
390                    ext.ext_params2);
391         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
392                                          KVM_S390_INT_PFAULT_INIT,
393                                          0, ext.ext_params2);
394
395         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
396         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
397         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
398                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
399         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
400                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
401         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
402         return rc ? -EFAULT : 0;
403 }
404
405 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
406 {
407         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
408         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
409         struct kvm_s390_mchk_info mchk = {};
410         unsigned long adtl_status_addr;
411         int deliver = 0;
412         int rc = 0;
413
414         spin_lock(&fi->lock);
415         spin_lock(&li->lock);
416         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
417             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
418                 /*
419                  * If there was an exigent machine check pending, then any
420                  * repressible machine checks that might have been pending
421                  * are indicated along with it, so always clear bits for
422                  * repressible and exigent interrupts
423                  */
424                 mchk = li->irq.mchk;
425                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
426                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
427                 memset(&li->irq.mchk, 0, sizeof(mchk));
428                 deliver = 1;
429         }
430         /*
431          * We indicate floating repressible conditions along with
432          * other pending conditions. Channel Report Pending and Channel
433          * Subsystem damage are the only two and and are indicated by
434          * bits in mcic and masked in cr14.
435          */
436         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
437                 mchk.mcic |= fi->mchk.mcic;
438                 mchk.cr14 |= fi->mchk.cr14;
439                 memset(&fi->mchk, 0, sizeof(mchk));
440                 deliver = 1;
441         }
442         spin_unlock(&li->lock);
443         spin_unlock(&fi->lock);
444
445         if (deliver) {
446                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
447                            mchk.mcic);
448                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
449                                                  KVM_S390_MCHK,
450                                                  mchk.cr14, mchk.mcic);
451
452                 rc  = kvm_s390_vcpu_store_status(vcpu,
453                                                  KVM_S390_STORE_STATUS_PREFIXED);
454                 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
455                                     &adtl_status_addr,
456                                     sizeof(unsigned long));
457                 rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
458                                                       adtl_status_addr);
459                 rc |= put_guest_lc(vcpu, mchk.mcic,
460                                    (u64 __user *) __LC_MCCK_CODE);
461                 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
462                                    (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
463                 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
464                                      &mchk.fixed_logout,
465                                      sizeof(mchk.fixed_logout));
466                 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
467                                      &vcpu->arch.sie_block->gpsw,
468                                      sizeof(psw_t));
469                 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
470                                     &vcpu->arch.sie_block->gpsw,
471                                     sizeof(psw_t));
472         }
473         return rc ? -EFAULT : 0;
474 }
475
476 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
477 {
478         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
479         int rc;
480
481         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
482         vcpu->stat.deliver_restart_signal++;
483         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
484
485         rc  = write_guest_lc(vcpu,
486                              offsetof(struct lowcore, restart_old_psw),
487                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
488         rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
489                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
490         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
491         return rc ? -EFAULT : 0;
492 }
493
494 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
495 {
496         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
497         struct kvm_s390_prefix_info prefix;
498
499         spin_lock(&li->lock);
500         prefix = li->irq.prefix;
501         li->irq.prefix.address = 0;
502         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
503         spin_unlock(&li->lock);
504
505         vcpu->stat.deliver_prefix_signal++;
506         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
507                                          KVM_S390_SIGP_SET_PREFIX,
508                                          prefix.address, 0);
509
510         kvm_s390_set_prefix(vcpu, prefix.address);
511         return 0;
512 }
513
514 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
515 {
516         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
517         int rc;
518         int cpu_addr;
519
520         spin_lock(&li->lock);
521         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
522         clear_bit(cpu_addr, li->sigp_emerg_pending);
523         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
524                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
525         spin_unlock(&li->lock);
526
527         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
528         vcpu->stat.deliver_emergency_signal++;
529         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
530                                          cpu_addr, 0);
531
532         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
533                            (u16 *)__LC_EXT_INT_CODE);
534         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
535         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
536                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
537         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
538                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
539         return rc ? -EFAULT : 0;
540 }
541
542 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
543 {
544         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
545         struct kvm_s390_extcall_info extcall;
546         int rc;
547
548         spin_lock(&li->lock);
549         extcall = li->irq.extcall;
550         li->irq.extcall.code = 0;
551         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
552         spin_unlock(&li->lock);
553
554         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
555         vcpu->stat.deliver_external_call++;
556         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
557                                          KVM_S390_INT_EXTERNAL_CALL,
558                                          extcall.code, 0);
559
560         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
561                            (u16 *)__LC_EXT_INT_CODE);
562         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
563         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
564                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
565         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
566                             sizeof(psw_t));
567         return rc ? -EFAULT : 0;
568 }
569
570 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
571 {
572         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
573         struct kvm_s390_pgm_info pgm_info;
574         int rc = 0, nullifying = false;
575         u16 ilen;
576
577         spin_lock(&li->lock);
578         pgm_info = li->irq.pgm;
579         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
580         memset(&li->irq.pgm, 0, sizeof(pgm_info));
581         spin_unlock(&li->lock);
582
583         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
584         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
585                    pgm_info.code, ilen);
586         vcpu->stat.deliver_program_int++;
587         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
588                                          pgm_info.code, 0);
589
590         switch (pgm_info.code & ~PGM_PER) {
591         case PGM_AFX_TRANSLATION:
592         case PGM_ASX_TRANSLATION:
593         case PGM_EX_TRANSLATION:
594         case PGM_LFX_TRANSLATION:
595         case PGM_LSTE_SEQUENCE:
596         case PGM_LSX_TRANSLATION:
597         case PGM_LX_TRANSLATION:
598         case PGM_PRIMARY_AUTHORITY:
599         case PGM_SECONDARY_AUTHORITY:
600                 nullifying = true;
601                 /* fall through */
602         case PGM_SPACE_SWITCH:
603                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
604                                   (u64 *)__LC_TRANS_EXC_CODE);
605                 break;
606         case PGM_ALEN_TRANSLATION:
607         case PGM_ALE_SEQUENCE:
608         case PGM_ASTE_INSTANCE:
609         case PGM_ASTE_SEQUENCE:
610         case PGM_ASTE_VALIDITY:
611         case PGM_EXTENDED_AUTHORITY:
612                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
613                                   (u8 *)__LC_EXC_ACCESS_ID);
614                 nullifying = true;
615                 break;
616         case PGM_ASCE_TYPE:
617         case PGM_PAGE_TRANSLATION:
618         case PGM_REGION_FIRST_TRANS:
619         case PGM_REGION_SECOND_TRANS:
620         case PGM_REGION_THIRD_TRANS:
621         case PGM_SEGMENT_TRANSLATION:
622                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
623                                   (u64 *)__LC_TRANS_EXC_CODE);
624                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
625                                    (u8 *)__LC_EXC_ACCESS_ID);
626                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
627                                    (u8 *)__LC_OP_ACCESS_ID);
628                 nullifying = true;
629                 break;
630         case PGM_MONITOR:
631                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
632                                   (u16 *)__LC_MON_CLASS_NR);
633                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
634                                    (u64 *)__LC_MON_CODE);
635                 break;
636         case PGM_VECTOR_PROCESSING:
637         case PGM_DATA:
638                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
639                                   (u32 *)__LC_DATA_EXC_CODE);
640                 break;
641         case PGM_PROTECTION:
642                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
643                                   (u64 *)__LC_TRANS_EXC_CODE);
644                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
645                                    (u8 *)__LC_EXC_ACCESS_ID);
646                 break;
647         case PGM_STACK_FULL:
648         case PGM_STACK_EMPTY:
649         case PGM_STACK_SPECIFICATION:
650         case PGM_STACK_TYPE:
651         case PGM_STACK_OPERATION:
652         case PGM_TRACE_TABEL:
653         case PGM_CRYPTO_OPERATION:
654                 nullifying = true;
655                 break;
656         }
657
658         if (pgm_info.code & PGM_PER) {
659                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
660                                    (u8 *) __LC_PER_CODE);
661                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
662                                    (u8 *)__LC_PER_ATMID);
663                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
664                                    (u64 *) __LC_PER_ADDRESS);
665                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
666                                    (u8 *) __LC_PER_ACCESS_ID);
667         }
668
669         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
670                 kvm_s390_rewind_psw(vcpu, ilen);
671
672         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
673         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
674         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
675                                  (u64 *) __LC_LAST_BREAK);
676         rc |= put_guest_lc(vcpu, pgm_info.code,
677                            (u16 *)__LC_PGM_INT_CODE);
678         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
679                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
680         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
681                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
682         return rc ? -EFAULT : 0;
683 }
684
685 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
686 {
687         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
688         struct kvm_s390_ext_info ext;
689         int rc = 0;
690
691         spin_lock(&fi->lock);
692         if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
693                 spin_unlock(&fi->lock);
694                 return 0;
695         }
696         ext = fi->srv_signal;
697         memset(&fi->srv_signal, 0, sizeof(ext));
698         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
699         spin_unlock(&fi->lock);
700
701         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
702                    ext.ext_params);
703         vcpu->stat.deliver_service_signal++;
704         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
705                                          ext.ext_params, 0);
706
707         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
708         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
709         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
710                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
711         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
712                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
713         rc |= put_guest_lc(vcpu, ext.ext_params,
714                            (u32 *)__LC_EXT_PARAMS);
715
716         return rc ? -EFAULT : 0;
717 }
718
719 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
720 {
721         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
722         struct kvm_s390_interrupt_info *inti;
723         int rc = 0;
724
725         spin_lock(&fi->lock);
726         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
727                                         struct kvm_s390_interrupt_info,
728                                         list);
729         if (inti) {
730                 list_del(&inti->list);
731                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
732         }
733         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
734                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
735         spin_unlock(&fi->lock);
736
737         if (inti) {
738                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
739                                                  KVM_S390_INT_PFAULT_DONE, 0,
740                                                  inti->ext.ext_params2);
741                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
742                            inti->ext.ext_params2);
743
744                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
745                                 (u16 *)__LC_EXT_INT_CODE);
746                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
747                                 (u16 *)__LC_EXT_CPU_ADDR);
748                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
749                                 &vcpu->arch.sie_block->gpsw,
750                                 sizeof(psw_t));
751                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
752                                 &vcpu->arch.sie_block->gpsw,
753                                 sizeof(psw_t));
754                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
755                                 (u64 *)__LC_EXT_PARAMS2);
756                 kfree(inti);
757         }
758         return rc ? -EFAULT : 0;
759 }
760
761 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
762 {
763         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
764         struct kvm_s390_interrupt_info *inti;
765         int rc = 0;
766
767         spin_lock(&fi->lock);
768         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
769                                         struct kvm_s390_interrupt_info,
770                                         list);
771         if (inti) {
772                 VCPU_EVENT(vcpu, 4,
773                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
774                            inti->ext.ext_params, inti->ext.ext_params2);
775                 vcpu->stat.deliver_virtio_interrupt++;
776                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
777                                 inti->type,
778                                 inti->ext.ext_params,
779                                 inti->ext.ext_params2);
780                 list_del(&inti->list);
781                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
782         }
783         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
784                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
785         spin_unlock(&fi->lock);
786
787         if (inti) {
788                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
789                                 (u16 *)__LC_EXT_INT_CODE);
790                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
791                                 (u16 *)__LC_EXT_CPU_ADDR);
792                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
793                                 &vcpu->arch.sie_block->gpsw,
794                                 sizeof(psw_t));
795                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
796                                 &vcpu->arch.sie_block->gpsw,
797                                 sizeof(psw_t));
798                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
799                                 (u32 *)__LC_EXT_PARAMS);
800                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
801                                 (u64 *)__LC_EXT_PARAMS2);
802                 kfree(inti);
803         }
804         return rc ? -EFAULT : 0;
805 }
806
807 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
808                                      unsigned long irq_type)
809 {
810         struct list_head *isc_list;
811         struct kvm_s390_float_interrupt *fi;
812         struct kvm_s390_interrupt_info *inti = NULL;
813         int rc = 0;
814
815         fi = &vcpu->kvm->arch.float_int;
816
817         spin_lock(&fi->lock);
818         isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
819         inti = list_first_entry_or_null(isc_list,
820                                         struct kvm_s390_interrupt_info,
821                                         list);
822         if (inti) {
823                 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
824                 vcpu->stat.deliver_io_int++;
825                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
826                                 inti->type,
827                                 ((__u32)inti->io.subchannel_id << 16) |
828                                 inti->io.subchannel_nr,
829                                 ((__u64)inti->io.io_int_parm << 32) |
830                                 inti->io.io_int_word);
831                 list_del(&inti->list);
832                 fi->counters[FIRQ_CNTR_IO] -= 1;
833         }
834         if (list_empty(isc_list))
835                 clear_bit(irq_type, &fi->pending_irqs);
836         spin_unlock(&fi->lock);
837
838         if (inti) {
839                 rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
840                                 (u16 *)__LC_SUBCHANNEL_ID);
841                 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
842                                 (u16 *)__LC_SUBCHANNEL_NR);
843                 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
844                                 (u32 *)__LC_IO_INT_PARM);
845                 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
846                                 (u32 *)__LC_IO_INT_WORD);
847                 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
848                                 &vcpu->arch.sie_block->gpsw,
849                                 sizeof(psw_t));
850                 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
851                                 &vcpu->arch.sie_block->gpsw,
852                                 sizeof(psw_t));
853                 kfree(inti);
854         }
855
856         return rc ? -EFAULT : 0;
857 }
858
859 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
860
861 static const deliver_irq_t deliver_irq_funcs[] = {
862         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
863         [IRQ_PEND_MCHK_REP]       = __deliver_machine_check,
864         [IRQ_PEND_PROG]           = __deliver_prog,
865         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
866         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
867         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
868         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
869         [IRQ_PEND_RESTART]        = __deliver_restart,
870         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
871         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
872         [IRQ_PEND_EXT_SERVICE]    = __deliver_service,
873         [IRQ_PEND_PFAULT_DONE]    = __deliver_pfault_done,
874         [IRQ_PEND_VIRTIO]         = __deliver_virtio,
875 };
876
877 /* Check whether an external call is pending (deliverable or not) */
878 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
879 {
880         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
881
882         if (!sclp.has_sigpif)
883                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
884
885         return sca_ext_call_pending(vcpu, NULL);
886 }
887
888 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
889 {
890         if (deliverable_irqs(vcpu))
891                 return 1;
892
893         if (kvm_cpu_has_pending_timer(vcpu))
894                 return 1;
895
896         /* external call pending and deliverable */
897         if (kvm_s390_ext_call_pending(vcpu) &&
898             !psw_extint_disabled(vcpu) &&
899             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
900                 return 1;
901
902         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
903                 return 1;
904         return 0;
905 }
906
907 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
908 {
909         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
910 }
911
912 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
913 {
914         u64 now, cputm, sltime = 0;
915
916         if (ckc_interrupts_enabled(vcpu)) {
917                 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
918                 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
919                 /* already expired or overflow? */
920                 if (!sltime || vcpu->arch.sie_block->ckc <= now)
921                         return 0;
922                 if (cpu_timer_interrupts_enabled(vcpu)) {
923                         cputm = kvm_s390_get_cpu_timer(vcpu);
924                         /* already expired? */
925                         if (cputm >> 63)
926                                 return 0;
927                         return min(sltime, tod_to_ns(cputm));
928                 }
929         } else if (cpu_timer_interrupts_enabled(vcpu)) {
930                 sltime = kvm_s390_get_cpu_timer(vcpu);
931                 /* already expired? */
932                 if (sltime >> 63)
933                         return 0;
934         }
935         return sltime;
936 }
937
938 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
939 {
940         u64 sltime;
941
942         vcpu->stat.exit_wait_state++;
943
944         /* fast path */
945         if (kvm_arch_vcpu_runnable(vcpu))
946                 return 0;
947
948         if (psw_interrupts_disabled(vcpu)) {
949                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
950                 return -EOPNOTSUPP; /* disabled wait */
951         }
952
953         if (!ckc_interrupts_enabled(vcpu) &&
954             !cpu_timer_interrupts_enabled(vcpu)) {
955                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
956                 __set_cpu_idle(vcpu);
957                 goto no_timer;
958         }
959
960         sltime = __calculate_sltime(vcpu);
961         if (!sltime)
962                 return 0;
963
964         __set_cpu_idle(vcpu);
965         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
966         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
967 no_timer:
968         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
969         kvm_vcpu_block(vcpu);
970         __unset_cpu_idle(vcpu);
971         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
972
973         hrtimer_cancel(&vcpu->arch.ckc_timer);
974         return 0;
975 }
976
977 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
978 {
979         if (swait_active(&vcpu->wq)) {
980                 /*
981                  * The vcpu gave up the cpu voluntarily, mark it as a good
982                  * yield-candidate.
983                  */
984                 vcpu->preempted = true;
985                 swake_up(&vcpu->wq);
986                 vcpu->stat.halt_wakeup++;
987         }
988 }
989
990 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
991 {
992         struct kvm_vcpu *vcpu;
993         u64 sltime;
994
995         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
996         sltime = __calculate_sltime(vcpu);
997
998         /*
999          * If the monotonic clock runs faster than the tod clock we might be
1000          * woken up too early and have to go back to sleep to avoid deadlocks.
1001          */
1002         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1003                 return HRTIMER_RESTART;
1004         kvm_s390_vcpu_wakeup(vcpu);
1005         return HRTIMER_NORESTART;
1006 }
1007
1008 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1009 {
1010         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1011
1012         spin_lock(&li->lock);
1013         li->pending_irqs = 0;
1014         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1015         memset(&li->irq, 0, sizeof(li->irq));
1016         spin_unlock(&li->lock);
1017
1018         sca_clear_ext_call(vcpu);
1019 }
1020
1021 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1022 {
1023         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1024         deliver_irq_t func;
1025         int rc = 0;
1026         unsigned long irq_type;
1027         unsigned long irqs;
1028
1029         __reset_intercept_indicators(vcpu);
1030
1031         /* pending ckc conditions might have been invalidated */
1032         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1033         if (ckc_irq_pending(vcpu))
1034                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1035
1036         /* pending cpu timer conditions might have been invalidated */
1037         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1038         if (cpu_timer_irq_pending(vcpu))
1039                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1040
1041         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1042                 /* bits are in the order of interrupt priority */
1043                 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
1044                 if (is_ioirq(irq_type)) {
1045                         rc = __deliver_io(vcpu, irq_type);
1046                 } else {
1047                         func = deliver_irq_funcs[irq_type];
1048                         if (!func) {
1049                                 WARN_ON_ONCE(func == NULL);
1050                                 clear_bit(irq_type, &li->pending_irqs);
1051                                 continue;
1052                         }
1053                         rc = func(vcpu);
1054                 }
1055         }
1056
1057         set_intercept_indicators(vcpu);
1058
1059         return rc;
1060 }
1061
1062 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1063 {
1064         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1065
1066         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1067         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1068                                    irq->u.pgm.code, 0);
1069
1070         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1071                 /* auto detection if no valid ILC was given */
1072                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1073                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1074                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1075         }
1076
1077         if (irq->u.pgm.code == PGM_PER) {
1078                 li->irq.pgm.code |= PGM_PER;
1079                 li->irq.pgm.flags = irq->u.pgm.flags;
1080                 /* only modify PER related information */
1081                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1082                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1083                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1084                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1085         } else if (!(irq->u.pgm.code & PGM_PER)) {
1086                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1087                                    irq->u.pgm.code;
1088                 li->irq.pgm.flags = irq->u.pgm.flags;
1089                 /* only modify non-PER information */
1090                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1091                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1092                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1093                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1094                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1095                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1096         } else {
1097                 li->irq.pgm = irq->u.pgm;
1098         }
1099         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1100         return 0;
1101 }
1102
1103 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1104 {
1105         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1106
1107         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1108                    irq->u.ext.ext_params2);
1109         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1110                                    irq->u.ext.ext_params,
1111                                    irq->u.ext.ext_params2);
1112
1113         li->irq.ext = irq->u.ext;
1114         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1115         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1116         return 0;
1117 }
1118
1119 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1120 {
1121         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1122         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1123         uint16_t src_id = irq->u.extcall.code;
1124
1125         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1126                    src_id);
1127         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1128                                    src_id, 0);
1129
1130         /* sending vcpu invalid */
1131         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1132                 return -EINVAL;
1133
1134         if (sclp.has_sigpif)
1135                 return sca_inject_ext_call(vcpu, src_id);
1136
1137         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1138                 return -EBUSY;
1139         *extcall = irq->u.extcall;
1140         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1141         return 0;
1142 }
1143
1144 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1145 {
1146         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1147         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1148
1149         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1150                    irq->u.prefix.address);
1151         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1152                                    irq->u.prefix.address, 0);
1153
1154         if (!is_vcpu_stopped(vcpu))
1155                 return -EBUSY;
1156
1157         *prefix = irq->u.prefix;
1158         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1159         return 0;
1160 }
1161
1162 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1163 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1164 {
1165         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1166         struct kvm_s390_stop_info *stop = &li->irq.stop;
1167         int rc = 0;
1168
1169         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1170
1171         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1172                 return -EINVAL;
1173
1174         if (is_vcpu_stopped(vcpu)) {
1175                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1176                         rc = kvm_s390_store_status_unloaded(vcpu,
1177                                                 KVM_S390_STORE_STATUS_NOADDR);
1178                 return rc;
1179         }
1180
1181         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1182                 return -EBUSY;
1183         stop->flags = irq->u.stop.flags;
1184         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1185         return 0;
1186 }
1187
1188 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1189                                  struct kvm_s390_irq *irq)
1190 {
1191         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1192
1193         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1194         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1195
1196         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1197         return 0;
1198 }
1199
1200 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1201                                    struct kvm_s390_irq *irq)
1202 {
1203         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1204
1205         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1206                    irq->u.emerg.code);
1207         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1208                                    irq->u.emerg.code, 0);
1209
1210         /* sending vcpu invalid */
1211         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1212                 return -EINVAL;
1213
1214         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1215         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1216         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1217         return 0;
1218 }
1219
1220 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1221 {
1222         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1223         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1224
1225         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1226                    irq->u.mchk.mcic);
1227         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1228                                    irq->u.mchk.mcic);
1229
1230         /*
1231          * Because repressible machine checks can be indicated along with
1232          * exigent machine checks (PoP, Chapter 11, Interruption action)
1233          * we need to combine cr14, mcic and external damage code.
1234          * Failing storage address and the logout area should not be or'ed
1235          * together, we just indicate the last occurrence of the corresponding
1236          * machine check
1237          */
1238         mchk->cr14 |= irq->u.mchk.cr14;
1239         mchk->mcic |= irq->u.mchk.mcic;
1240         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1241         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1242         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1243                sizeof(mchk->fixed_logout));
1244         if (mchk->mcic & MCHK_EX_MASK)
1245                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1246         else if (mchk->mcic & MCHK_REP_MASK)
1247                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1248         return 0;
1249 }
1250
1251 static int __inject_ckc(struct kvm_vcpu *vcpu)
1252 {
1253         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1254
1255         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1256         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1257                                    0, 0);
1258
1259         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1260         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1261         return 0;
1262 }
1263
1264 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1265 {
1266         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1267
1268         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1269         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1270                                    0, 0);
1271
1272         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1273         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1274         return 0;
1275 }
1276
1277 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1278                                                   int isc, u32 schid)
1279 {
1280         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1281         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1282         struct kvm_s390_interrupt_info *iter;
1283         u16 id = (schid & 0xffff0000U) >> 16;
1284         u16 nr = schid & 0x0000ffffU;
1285
1286         spin_lock(&fi->lock);
1287         list_for_each_entry(iter, isc_list, list) {
1288                 if (schid && (id != iter->io.subchannel_id ||
1289                               nr != iter->io.subchannel_nr))
1290                         continue;
1291                 /* found an appropriate entry */
1292                 list_del_init(&iter->list);
1293                 fi->counters[FIRQ_CNTR_IO] -= 1;
1294                 if (list_empty(isc_list))
1295                         clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1296                 spin_unlock(&fi->lock);
1297                 return iter;
1298         }
1299         spin_unlock(&fi->lock);
1300         return NULL;
1301 }
1302
1303 /*
1304  * Dequeue and return an I/O interrupt matching any of the interruption
1305  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1306  */
1307 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1308                                                     u64 isc_mask, u32 schid)
1309 {
1310         struct kvm_s390_interrupt_info *inti = NULL;
1311         int isc;
1312
1313         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1314                 if (isc_mask & isc_to_isc_bits(isc))
1315                         inti = get_io_int(kvm, isc, schid);
1316         }
1317         return inti;
1318 }
1319
1320 #define SCCB_MASK 0xFFFFFFF8
1321 #define SCCB_EVENT_PENDING 0x3
1322
1323 static int __inject_service(struct kvm *kvm,
1324                              struct kvm_s390_interrupt_info *inti)
1325 {
1326         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1327
1328         spin_lock(&fi->lock);
1329         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1330         /*
1331          * Early versions of the QEMU s390 bios will inject several
1332          * service interrupts after another without handling a
1333          * condition code indicating busy.
1334          * We will silently ignore those superfluous sccb values.
1335          * A future version of QEMU will take care of serialization
1336          * of servc requests
1337          */
1338         if (fi->srv_signal.ext_params & SCCB_MASK)
1339                 goto out;
1340         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1341         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1342 out:
1343         spin_unlock(&fi->lock);
1344         kfree(inti);
1345         return 0;
1346 }
1347
1348 static int __inject_virtio(struct kvm *kvm,
1349                             struct kvm_s390_interrupt_info *inti)
1350 {
1351         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1352
1353         spin_lock(&fi->lock);
1354         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1355                 spin_unlock(&fi->lock);
1356                 return -EBUSY;
1357         }
1358         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1359         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1360         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1361         spin_unlock(&fi->lock);
1362         return 0;
1363 }
1364
1365 static int __inject_pfault_done(struct kvm *kvm,
1366                                  struct kvm_s390_interrupt_info *inti)
1367 {
1368         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1369
1370         spin_lock(&fi->lock);
1371         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1372                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1373                 spin_unlock(&fi->lock);
1374                 return -EBUSY;
1375         }
1376         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1377         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1378         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1379         spin_unlock(&fi->lock);
1380         return 0;
1381 }
1382
1383 #define CR_PENDING_SUBCLASS 28
1384 static int __inject_float_mchk(struct kvm *kvm,
1385                                 struct kvm_s390_interrupt_info *inti)
1386 {
1387         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1388
1389         spin_lock(&fi->lock);
1390         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1391         fi->mchk.mcic |= inti->mchk.mcic;
1392         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1393         spin_unlock(&fi->lock);
1394         kfree(inti);
1395         return 0;
1396 }
1397
1398 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1399 {
1400         struct kvm_s390_float_interrupt *fi;
1401         struct list_head *list;
1402         int isc;
1403
1404         fi = &kvm->arch.float_int;
1405         spin_lock(&fi->lock);
1406         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1407                 spin_unlock(&fi->lock);
1408                 return -EBUSY;
1409         }
1410         fi->counters[FIRQ_CNTR_IO] += 1;
1411
1412         isc = int_word_to_isc(inti->io.io_int_word);
1413         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1414         list_add_tail(&inti->list, list);
1415         set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1416         spin_unlock(&fi->lock);
1417         return 0;
1418 }
1419
1420 /*
1421  * Find a destination VCPU for a floating irq and kick it.
1422  */
1423 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1424 {
1425         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1426         struct kvm_s390_local_interrupt *li;
1427         struct kvm_vcpu *dst_vcpu;
1428         int sigcpu, online_vcpus, nr_tries = 0;
1429
1430         online_vcpus = atomic_read(&kvm->online_vcpus);
1431         if (!online_vcpus)
1432                 return;
1433
1434         /* find idle VCPUs first, then round robin */
1435         sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1436         if (sigcpu == online_vcpus) {
1437                 do {
1438                         sigcpu = fi->next_rr_cpu;
1439                         fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1440                         /* avoid endless loops if all vcpus are stopped */
1441                         if (nr_tries++ >= online_vcpus)
1442                                 return;
1443                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1444         }
1445         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1446
1447         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1448         li = &dst_vcpu->arch.local_int;
1449         spin_lock(&li->lock);
1450         switch (type) {
1451         case KVM_S390_MCHK:
1452                 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1453                 break;
1454         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1455                 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1456                 break;
1457         default:
1458                 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1459                 break;
1460         }
1461         spin_unlock(&li->lock);
1462         kvm_s390_vcpu_wakeup(dst_vcpu);
1463 }
1464
1465 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1466 {
1467         u64 type = READ_ONCE(inti->type);
1468         int rc;
1469
1470         switch (type) {
1471         case KVM_S390_MCHK:
1472                 rc = __inject_float_mchk(kvm, inti);
1473                 break;
1474         case KVM_S390_INT_VIRTIO:
1475                 rc = __inject_virtio(kvm, inti);
1476                 break;
1477         case KVM_S390_INT_SERVICE:
1478                 rc = __inject_service(kvm, inti);
1479                 break;
1480         case KVM_S390_INT_PFAULT_DONE:
1481                 rc = __inject_pfault_done(kvm, inti);
1482                 break;
1483         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1484                 rc = __inject_io(kvm, inti);
1485                 break;
1486         default:
1487                 rc = -EINVAL;
1488         }
1489         if (rc)
1490                 return rc;
1491
1492         __floating_irq_kick(kvm, type);
1493         return 0;
1494 }
1495
1496 int kvm_s390_inject_vm(struct kvm *kvm,
1497                        struct kvm_s390_interrupt *s390int)
1498 {
1499         struct kvm_s390_interrupt_info *inti;
1500         int rc;
1501
1502         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1503         if (!inti)
1504                 return -ENOMEM;
1505
1506         inti->type = s390int->type;
1507         switch (inti->type) {
1508         case KVM_S390_INT_VIRTIO:
1509                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1510                          s390int->parm, s390int->parm64);
1511                 inti->ext.ext_params = s390int->parm;
1512                 inti->ext.ext_params2 = s390int->parm64;
1513                 break;
1514         case KVM_S390_INT_SERVICE:
1515                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1516                 inti->ext.ext_params = s390int->parm;
1517                 break;
1518         case KVM_S390_INT_PFAULT_DONE:
1519                 inti->ext.ext_params2 = s390int->parm64;
1520                 break;
1521         case KVM_S390_MCHK:
1522                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1523                          s390int->parm64);
1524                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1525                 inti->mchk.mcic = s390int->parm64;
1526                 break;
1527         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1528                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1529                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1530                 else
1531                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1532                                  s390int->type & IOINT_CSSID_MASK,
1533                                  s390int->type & IOINT_SSID_MASK,
1534                                  s390int->type & IOINT_SCHID_MASK);
1535                 inti->io.subchannel_id = s390int->parm >> 16;
1536                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1537                 inti->io.io_int_parm = s390int->parm64 >> 32;
1538                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1539                 break;
1540         default:
1541                 kfree(inti);
1542                 return -EINVAL;
1543         }
1544         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1545                                  2);
1546
1547         rc = __inject_vm(kvm, inti);
1548         if (rc)
1549                 kfree(inti);
1550         return rc;
1551 }
1552
1553 int kvm_s390_reinject_io_int(struct kvm *kvm,
1554                               struct kvm_s390_interrupt_info *inti)
1555 {
1556         return __inject_vm(kvm, inti);
1557 }
1558
1559 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1560                        struct kvm_s390_irq *irq)
1561 {
1562         irq->type = s390int->type;
1563         switch (irq->type) {
1564         case KVM_S390_PROGRAM_INT:
1565                 if (s390int->parm & 0xffff0000)
1566                         return -EINVAL;
1567                 irq->u.pgm.code = s390int->parm;
1568                 break;
1569         case KVM_S390_SIGP_SET_PREFIX:
1570                 irq->u.prefix.address = s390int->parm;
1571                 break;
1572         case KVM_S390_SIGP_STOP:
1573                 irq->u.stop.flags = s390int->parm;
1574                 break;
1575         case KVM_S390_INT_EXTERNAL_CALL:
1576                 if (s390int->parm & 0xffff0000)
1577                         return -EINVAL;
1578                 irq->u.extcall.code = s390int->parm;
1579                 break;
1580         case KVM_S390_INT_EMERGENCY:
1581                 if (s390int->parm & 0xffff0000)
1582                         return -EINVAL;
1583                 irq->u.emerg.code = s390int->parm;
1584                 break;
1585         case KVM_S390_MCHK:
1586                 irq->u.mchk.mcic = s390int->parm64;
1587                 break;
1588         }
1589         return 0;
1590 }
1591
1592 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1593 {
1594         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1595
1596         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1597 }
1598
1599 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1600 {
1601         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1602
1603         spin_lock(&li->lock);
1604         li->irq.stop.flags = 0;
1605         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1606         spin_unlock(&li->lock);
1607 }
1608
1609 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1610 {
1611         int rc;
1612
1613         switch (irq->type) {
1614         case KVM_S390_PROGRAM_INT:
1615                 rc = __inject_prog(vcpu, irq);
1616                 break;
1617         case KVM_S390_SIGP_SET_PREFIX:
1618                 rc = __inject_set_prefix(vcpu, irq);
1619                 break;
1620         case KVM_S390_SIGP_STOP:
1621                 rc = __inject_sigp_stop(vcpu, irq);
1622                 break;
1623         case KVM_S390_RESTART:
1624                 rc = __inject_sigp_restart(vcpu, irq);
1625                 break;
1626         case KVM_S390_INT_CLOCK_COMP:
1627                 rc = __inject_ckc(vcpu);
1628                 break;
1629         case KVM_S390_INT_CPU_TIMER:
1630                 rc = __inject_cpu_timer(vcpu);
1631                 break;
1632         case KVM_S390_INT_EXTERNAL_CALL:
1633                 rc = __inject_extcall(vcpu, irq);
1634                 break;
1635         case KVM_S390_INT_EMERGENCY:
1636                 rc = __inject_sigp_emergency(vcpu, irq);
1637                 break;
1638         case KVM_S390_MCHK:
1639                 rc = __inject_mchk(vcpu, irq);
1640                 break;
1641         case KVM_S390_INT_PFAULT_INIT:
1642                 rc = __inject_pfault_init(vcpu, irq);
1643                 break;
1644         case KVM_S390_INT_VIRTIO:
1645         case KVM_S390_INT_SERVICE:
1646         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1647         default:
1648                 rc = -EINVAL;
1649         }
1650
1651         return rc;
1652 }
1653
1654 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1655 {
1656         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1657         int rc;
1658
1659         spin_lock(&li->lock);
1660         rc = do_inject_vcpu(vcpu, irq);
1661         spin_unlock(&li->lock);
1662         if (!rc)
1663                 kvm_s390_vcpu_wakeup(vcpu);
1664         return rc;
1665 }
1666
1667 static inline void clear_irq_list(struct list_head *_list)
1668 {
1669         struct kvm_s390_interrupt_info *inti, *n;
1670
1671         list_for_each_entry_safe(inti, n, _list, list) {
1672                 list_del(&inti->list);
1673                 kfree(inti);
1674         }
1675 }
1676
1677 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1678                        struct kvm_s390_irq *irq)
1679 {
1680         irq->type = inti->type;
1681         switch (inti->type) {
1682         case KVM_S390_INT_PFAULT_INIT:
1683         case KVM_S390_INT_PFAULT_DONE:
1684         case KVM_S390_INT_VIRTIO:
1685                 irq->u.ext = inti->ext;
1686                 break;
1687         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1688                 irq->u.io = inti->io;
1689                 break;
1690         }
1691 }
1692
1693 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1694 {
1695         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1696         int i;
1697
1698         spin_lock(&fi->lock);
1699         fi->pending_irqs = 0;
1700         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1701         memset(&fi->mchk, 0, sizeof(fi->mchk));
1702         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1703                 clear_irq_list(&fi->lists[i]);
1704         for (i = 0; i < FIRQ_MAX_COUNT; i++)
1705                 fi->counters[i] = 0;
1706         spin_unlock(&fi->lock);
1707 };
1708
1709 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
1710 {
1711         struct kvm_s390_interrupt_info *inti;
1712         struct kvm_s390_float_interrupt *fi;
1713         struct kvm_s390_irq *buf;
1714         struct kvm_s390_irq *irq;
1715         int max_irqs;
1716         int ret = 0;
1717         int n = 0;
1718         int i;
1719
1720         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1721                 return -EINVAL;
1722
1723         /*
1724          * We are already using -ENOMEM to signal
1725          * userspace it may retry with a bigger buffer,
1726          * so we need to use something else for this case
1727          */
1728         buf = vzalloc(len);
1729         if (!buf)
1730                 return -ENOBUFS;
1731
1732         max_irqs = len / sizeof(struct kvm_s390_irq);
1733
1734         fi = &kvm->arch.float_int;
1735         spin_lock(&fi->lock);
1736         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1737                 list_for_each_entry(inti, &fi->lists[i], list) {
1738                         if (n == max_irqs) {
1739                                 /* signal userspace to try again */
1740                                 ret = -ENOMEM;
1741                                 goto out;
1742                         }
1743                         inti_to_irq(inti, &buf[n]);
1744                         n++;
1745                 }
1746         }
1747         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
1748                 if (n == max_irqs) {
1749                         /* signal userspace to try again */
1750                         ret = -ENOMEM;
1751                         goto out;
1752                 }
1753                 irq = (struct kvm_s390_irq *) &buf[n];
1754                 irq->type = KVM_S390_INT_SERVICE;
1755                 irq->u.ext = fi->srv_signal;
1756                 n++;
1757         }
1758         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1759                 if (n == max_irqs) {
1760                                 /* signal userspace to try again */
1761                                 ret = -ENOMEM;
1762                                 goto out;
1763                 }
1764                 irq = (struct kvm_s390_irq *) &buf[n];
1765                 irq->type = KVM_S390_MCHK;
1766                 irq->u.mchk = fi->mchk;
1767                 n++;
1768 }
1769
1770 out:
1771         spin_unlock(&fi->lock);
1772         if (!ret && n > 0) {
1773                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1774                         ret = -EFAULT;
1775         }
1776         vfree(buf);
1777
1778         return ret < 0 ? ret : n;
1779 }
1780
1781 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1782 {
1783         int r;
1784
1785         switch (attr->group) {
1786         case KVM_DEV_FLIC_GET_ALL_IRQS:
1787                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
1788                                           attr->attr);
1789                 break;
1790         default:
1791                 r = -EINVAL;
1792         }
1793
1794         return r;
1795 }
1796
1797 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1798                                      u64 addr)
1799 {
1800         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1801         void *target = NULL;
1802         void __user *source;
1803         u64 size;
1804
1805         if (get_user(inti->type, (u64 __user *)addr))
1806                 return -EFAULT;
1807
1808         switch (inti->type) {
1809         case KVM_S390_INT_PFAULT_INIT:
1810         case KVM_S390_INT_PFAULT_DONE:
1811         case KVM_S390_INT_VIRTIO:
1812         case KVM_S390_INT_SERVICE:
1813                 target = (void *) &inti->ext;
1814                 source = &uptr->u.ext;
1815                 size = sizeof(inti->ext);
1816                 break;
1817         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1818                 target = (void *) &inti->io;
1819                 source = &uptr->u.io;
1820                 size = sizeof(inti->io);
1821                 break;
1822         case KVM_S390_MCHK:
1823                 target = (void *) &inti->mchk;
1824                 source = &uptr->u.mchk;
1825                 size = sizeof(inti->mchk);
1826                 break;
1827         default:
1828                 return -EINVAL;
1829         }
1830
1831         if (copy_from_user(target, source, size))
1832                 return -EFAULT;
1833
1834         return 0;
1835 }
1836
1837 static int enqueue_floating_irq(struct kvm_device *dev,
1838                                 struct kvm_device_attr *attr)
1839 {
1840         struct kvm_s390_interrupt_info *inti = NULL;
1841         int r = 0;
1842         int len = attr->attr;
1843
1844         if (len % sizeof(struct kvm_s390_irq) != 0)
1845                 return -EINVAL;
1846         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1847                 return -EINVAL;
1848
1849         while (len >= sizeof(struct kvm_s390_irq)) {
1850                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1851                 if (!inti)
1852                         return -ENOMEM;
1853
1854                 r = copy_irq_from_user(inti, attr->addr);
1855                 if (r) {
1856                         kfree(inti);
1857                         return r;
1858                 }
1859                 r = __inject_vm(dev->kvm, inti);
1860                 if (r) {
1861                         kfree(inti);
1862                         return r;
1863                 }
1864                 len -= sizeof(struct kvm_s390_irq);
1865                 attr->addr += sizeof(struct kvm_s390_irq);
1866         }
1867
1868         return r;
1869 }
1870
1871 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1872 {
1873         if (id >= MAX_S390_IO_ADAPTERS)
1874                 return NULL;
1875         return kvm->arch.adapters[id];
1876 }
1877
1878 static int register_io_adapter(struct kvm_device *dev,
1879                                struct kvm_device_attr *attr)
1880 {
1881         struct s390_io_adapter *adapter;
1882         struct kvm_s390_io_adapter adapter_info;
1883
1884         if (copy_from_user(&adapter_info,
1885                            (void __user *)attr->addr, sizeof(adapter_info)))
1886                 return -EFAULT;
1887
1888         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1889             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1890                 return -EINVAL;
1891
1892         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1893         if (!adapter)
1894                 return -ENOMEM;
1895
1896         INIT_LIST_HEAD(&adapter->maps);
1897         init_rwsem(&adapter->maps_lock);
1898         atomic_set(&adapter->nr_maps, 0);
1899         adapter->id = adapter_info.id;
1900         adapter->isc = adapter_info.isc;
1901         adapter->maskable = adapter_info.maskable;
1902         adapter->masked = false;
1903         adapter->swap = adapter_info.swap;
1904         dev->kvm->arch.adapters[adapter->id] = adapter;
1905
1906         return 0;
1907 }
1908
1909 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1910 {
1911         int ret;
1912         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1913
1914         if (!adapter || !adapter->maskable)
1915                 return -EINVAL;
1916         ret = adapter->masked;
1917         adapter->masked = masked;
1918         return ret;
1919 }
1920
1921 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1922 {
1923         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1924         struct s390_map_info *map;
1925         int ret;
1926
1927         if (!adapter || !addr)
1928                 return -EINVAL;
1929
1930         map = kzalloc(sizeof(*map), GFP_KERNEL);
1931         if (!map) {
1932                 ret = -ENOMEM;
1933                 goto out;
1934         }
1935         INIT_LIST_HEAD(&map->list);
1936         map->guest_addr = addr;
1937         map->addr = gmap_translate(kvm->arch.gmap, addr);
1938         if (map->addr == -EFAULT) {
1939                 ret = -EFAULT;
1940                 goto out;
1941         }
1942         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1943         if (ret < 0)
1944                 goto out;
1945         BUG_ON(ret != 1);
1946         down_write(&adapter->maps_lock);
1947         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1948                 list_add_tail(&map->list, &adapter->maps);
1949                 ret = 0;
1950         } else {
1951                 put_page(map->page);
1952                 ret = -EINVAL;
1953         }
1954         up_write(&adapter->maps_lock);
1955 out:
1956         if (ret)
1957                 kfree(map);
1958         return ret;
1959 }
1960
1961 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1962 {
1963         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1964         struct s390_map_info *map, *tmp;
1965         int found = 0;
1966
1967         if (!adapter || !addr)
1968                 return -EINVAL;
1969
1970         down_write(&adapter->maps_lock);
1971         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1972                 if (map->guest_addr == addr) {
1973                         found = 1;
1974                         atomic_dec(&adapter->nr_maps);
1975                         list_del(&map->list);
1976                         put_page(map->page);
1977                         kfree(map);
1978                         break;
1979                 }
1980         }
1981         up_write(&adapter->maps_lock);
1982
1983         return found ? 0 : -EINVAL;
1984 }
1985
1986 void kvm_s390_destroy_adapters(struct kvm *kvm)
1987 {
1988         int i;
1989         struct s390_map_info *map, *tmp;
1990
1991         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1992                 if (!kvm->arch.adapters[i])
1993                         continue;
1994                 list_for_each_entry_safe(map, tmp,
1995                                          &kvm->arch.adapters[i]->maps, list) {
1996                         list_del(&map->list);
1997                         put_page(map->page);
1998                         kfree(map);
1999                 }
2000                 kfree(kvm->arch.adapters[i]);
2001         }
2002 }
2003
2004 static int modify_io_adapter(struct kvm_device *dev,
2005                              struct kvm_device_attr *attr)
2006 {
2007         struct kvm_s390_io_adapter_req req;
2008         struct s390_io_adapter *adapter;
2009         int ret;
2010
2011         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2012                 return -EFAULT;
2013
2014         adapter = get_io_adapter(dev->kvm, req.id);
2015         if (!adapter)
2016                 return -EINVAL;
2017         switch (req.type) {
2018         case KVM_S390_IO_ADAPTER_MASK:
2019                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2020                 if (ret > 0)
2021                         ret = 0;
2022                 break;
2023         case KVM_S390_IO_ADAPTER_MAP:
2024                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2025                 break;
2026         case KVM_S390_IO_ADAPTER_UNMAP:
2027                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2028                 break;
2029         default:
2030                 ret = -EINVAL;
2031         }
2032
2033         return ret;
2034 }
2035
2036 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2037 {
2038         int r = 0;
2039         unsigned int i;
2040         struct kvm_vcpu *vcpu;
2041
2042         switch (attr->group) {
2043         case KVM_DEV_FLIC_ENQUEUE:
2044                 r = enqueue_floating_irq(dev, attr);
2045                 break;
2046         case KVM_DEV_FLIC_CLEAR_IRQS:
2047                 kvm_s390_clear_float_irqs(dev->kvm);
2048                 break;
2049         case KVM_DEV_FLIC_APF_ENABLE:
2050                 dev->kvm->arch.gmap->pfault_enabled = 1;
2051                 break;
2052         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2053                 dev->kvm->arch.gmap->pfault_enabled = 0;
2054                 /*
2055                  * Make sure no async faults are in transition when
2056                  * clearing the queues. So we don't need to worry
2057                  * about late coming workers.
2058                  */
2059                 synchronize_srcu(&dev->kvm->srcu);
2060                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2061                         kvm_clear_async_pf_completion_queue(vcpu);
2062                 break;
2063         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2064                 r = register_io_adapter(dev, attr);
2065                 break;
2066         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2067                 r = modify_io_adapter(dev, attr);
2068                 break;
2069         default:
2070                 r = -EINVAL;
2071         }
2072
2073         return r;
2074 }
2075
2076 static int flic_create(struct kvm_device *dev, u32 type)
2077 {
2078         if (!dev)
2079                 return -EINVAL;
2080         if (dev->kvm->arch.flic)
2081                 return -EINVAL;
2082         dev->kvm->arch.flic = dev;
2083         return 0;
2084 }
2085
2086 static void flic_destroy(struct kvm_device *dev)
2087 {
2088         dev->kvm->arch.flic = NULL;
2089         kfree(dev);
2090 }
2091
2092 /* s390 floating irq controller (flic) */
2093 struct kvm_device_ops kvm_flic_ops = {
2094         .name = "kvm-flic",
2095         .get_attr = flic_get_attr,
2096         .set_attr = flic_set_attr,
2097         .create = flic_create,
2098         .destroy = flic_destroy,
2099 };
2100
2101 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2102 {
2103         unsigned long bit;
2104
2105         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2106
2107         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2108 }
2109
2110 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2111                                           u64 addr)
2112 {
2113         struct s390_map_info *map;
2114
2115         if (!adapter)
2116                 return NULL;
2117
2118         list_for_each_entry(map, &adapter->maps, list) {
2119                 if (map->guest_addr == addr)
2120                         return map;
2121         }
2122         return NULL;
2123 }
2124
2125 static int adapter_indicators_set(struct kvm *kvm,
2126                                   struct s390_io_adapter *adapter,
2127                                   struct kvm_s390_adapter_int *adapter_int)
2128 {
2129         unsigned long bit;
2130         int summary_set, idx;
2131         struct s390_map_info *info;
2132         void *map;
2133
2134         info = get_map_info(adapter, adapter_int->ind_addr);
2135         if (!info)
2136                 return -1;
2137         map = page_address(info->page);
2138         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2139         set_bit(bit, map);
2140         idx = srcu_read_lock(&kvm->srcu);
2141         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2142         set_page_dirty_lock(info->page);
2143         info = get_map_info(adapter, adapter_int->summary_addr);
2144         if (!info) {
2145                 srcu_read_unlock(&kvm->srcu, idx);
2146                 return -1;
2147         }
2148         map = page_address(info->page);
2149         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2150                           adapter->swap);
2151         summary_set = test_and_set_bit(bit, map);
2152         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2153         set_page_dirty_lock(info->page);
2154         srcu_read_unlock(&kvm->srcu, idx);
2155         return summary_set ? 0 : 1;
2156 }
2157
2158 /*
2159  * < 0 - not injected due to error
2160  * = 0 - coalesced, summary indicator already active
2161  * > 0 - injected interrupt
2162  */
2163 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2164                            struct kvm *kvm, int irq_source_id, int level,
2165                            bool line_status)
2166 {
2167         int ret;
2168         struct s390_io_adapter *adapter;
2169
2170         /* We're only interested in the 0->1 transition. */
2171         if (!level)
2172                 return 0;
2173         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2174         if (!adapter)
2175                 return -1;
2176         down_read(&adapter->maps_lock);
2177         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2178         up_read(&adapter->maps_lock);
2179         if ((ret > 0) && !adapter->masked) {
2180                 struct kvm_s390_interrupt s390int = {
2181                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
2182                         .parm = 0,
2183                         .parm64 = (adapter->isc << 27) | 0x80000000,
2184                 };
2185                 ret = kvm_s390_inject_vm(kvm, &s390int);
2186                 if (ret == 0)
2187                         ret = 1;
2188         }
2189         return ret;
2190 }
2191
2192 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
2193                           const struct kvm_irq_routing_entry *ue)
2194 {
2195         int ret;
2196
2197         switch (ue->type) {
2198         case KVM_IRQ_ROUTING_S390_ADAPTER:
2199                 e->set = set_adapter_int;
2200                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2201                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2202                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2203                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2204                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2205                 ret = 0;
2206                 break;
2207         default:
2208                 ret = -EINVAL;
2209         }
2210
2211         return ret;
2212 }
2213
2214 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2215                 int irq_source_id, int level, bool line_status)
2216 {
2217         return -EINVAL;
2218 }
2219
2220 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2221 {
2222         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2223         struct kvm_s390_irq *buf;
2224         int r = 0;
2225         int n;
2226
2227         buf = vmalloc(len);
2228         if (!buf)
2229                 return -ENOMEM;
2230
2231         if (copy_from_user((void *) buf, irqstate, len)) {
2232                 r = -EFAULT;
2233                 goto out_free;
2234         }
2235
2236         /*
2237          * Don't allow setting the interrupt state
2238          * when there are already interrupts pending
2239          */
2240         spin_lock(&li->lock);
2241         if (li->pending_irqs) {
2242                 r = -EBUSY;
2243                 goto out_unlock;
2244         }
2245
2246         for (n = 0; n < len / sizeof(*buf); n++) {
2247                 r = do_inject_vcpu(vcpu, &buf[n]);
2248                 if (r)
2249                         break;
2250         }
2251
2252 out_unlock:
2253         spin_unlock(&li->lock);
2254 out_free:
2255         vfree(buf);
2256
2257         return r;
2258 }
2259
2260 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2261                             struct kvm_s390_irq *irq,
2262                             unsigned long irq_type)
2263 {
2264         switch (irq_type) {
2265         case IRQ_PEND_MCHK_EX:
2266         case IRQ_PEND_MCHK_REP:
2267                 irq->type = KVM_S390_MCHK;
2268                 irq->u.mchk = li->irq.mchk;
2269                 break;
2270         case IRQ_PEND_PROG:
2271                 irq->type = KVM_S390_PROGRAM_INT;
2272                 irq->u.pgm = li->irq.pgm;
2273                 break;
2274         case IRQ_PEND_PFAULT_INIT:
2275                 irq->type = KVM_S390_INT_PFAULT_INIT;
2276                 irq->u.ext = li->irq.ext;
2277                 break;
2278         case IRQ_PEND_EXT_EXTERNAL:
2279                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2280                 irq->u.extcall = li->irq.extcall;
2281                 break;
2282         case IRQ_PEND_EXT_CLOCK_COMP:
2283                 irq->type = KVM_S390_INT_CLOCK_COMP;
2284                 break;
2285         case IRQ_PEND_EXT_CPU_TIMER:
2286                 irq->type = KVM_S390_INT_CPU_TIMER;
2287                 break;
2288         case IRQ_PEND_SIGP_STOP:
2289                 irq->type = KVM_S390_SIGP_STOP;
2290                 irq->u.stop = li->irq.stop;
2291                 break;
2292         case IRQ_PEND_RESTART:
2293                 irq->type = KVM_S390_RESTART;
2294                 break;
2295         case IRQ_PEND_SET_PREFIX:
2296                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2297                 irq->u.prefix = li->irq.prefix;
2298                 break;
2299         }
2300 }
2301
2302 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2303 {
2304         int scn;
2305         unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2306         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2307         unsigned long pending_irqs;
2308         struct kvm_s390_irq irq;
2309         unsigned long irq_type;
2310         int cpuaddr;
2311         int n = 0;
2312
2313         spin_lock(&li->lock);
2314         pending_irqs = li->pending_irqs;
2315         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2316                sizeof(sigp_emerg_pending));
2317         spin_unlock(&li->lock);
2318
2319         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2320                 memset(&irq, 0, sizeof(irq));
2321                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2322                         continue;
2323                 if (n + sizeof(irq) > len)
2324                         return -ENOBUFS;
2325                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2326                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2327                         return -EFAULT;
2328                 n += sizeof(irq);
2329         }
2330
2331         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2332                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2333                         memset(&irq, 0, sizeof(irq));
2334                         if (n + sizeof(irq) > len)
2335                                 return -ENOBUFS;
2336                         irq.type = KVM_S390_INT_EMERGENCY;
2337                         irq.u.emerg.code = cpuaddr;
2338                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2339                                 return -EFAULT;
2340                         n += sizeof(irq);
2341                 }
2342         }
2343
2344         if (sca_ext_call_pending(vcpu, &scn)) {
2345                 if (n + sizeof(irq) > len)
2346                         return -ENOBUFS;
2347                 memset(&irq, 0, sizeof(irq));
2348                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2349                 irq.u.extcall.code = scn;
2350                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2351                         return -EFAULT;
2352                 n += sizeof(irq);
2353         }
2354
2355         return n;
2356 }