1 // SPDX-License-Identifier: GPL-2.0
3 * hosting IBM Z kernel virtual machines (s390x)
5 * Copyright IBM Corp. 2008, 2020
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
10 * Jason J. Herne <jjherne@us.ibm.com>
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/lowcore.h>
41 #include <asm/switch_to.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
48 #include <asm/fpu/api.h>
53 #define CREATE_TRACE_POINTS
55 #include "trace-s390.h"
57 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
62 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63 KVM_GENERIC_VM_STATS(),
64 STATS_DESC_COUNTER(VM, inject_io),
65 STATS_DESC_COUNTER(VM, inject_float_mchk),
66 STATS_DESC_COUNTER(VM, inject_pfault_done),
67 STATS_DESC_COUNTER(VM, inject_service_signal),
68 STATS_DESC_COUNTER(VM, inject_virtio),
69 STATS_DESC_COUNTER(VM, aen_forward)
72 const struct kvm_stats_header kvm_vm_stats_header = {
73 .name_size = KVM_STATS_NAME_SIZE,
74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75 .id_offset = sizeof(struct kvm_stats_header),
76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78 sizeof(kvm_vm_stats_desc),
81 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82 KVM_GENERIC_VCPU_STATS(),
83 STATS_DESC_COUNTER(VCPU, exit_userspace),
84 STATS_DESC_COUNTER(VCPU, exit_null),
85 STATS_DESC_COUNTER(VCPU, exit_external_request),
86 STATS_DESC_COUNTER(VCPU, exit_io_request),
87 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88 STATS_DESC_COUNTER(VCPU, exit_stop_request),
89 STATS_DESC_COUNTER(VCPU, exit_validity),
90 STATS_DESC_COUNTER(VCPU, exit_instruction),
91 STATS_DESC_COUNTER(VCPU, exit_pei),
92 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93 STATS_DESC_COUNTER(VCPU, instruction_lctl),
94 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95 STATS_DESC_COUNTER(VCPU, instruction_stctl),
96 STATS_DESC_COUNTER(VCPU, instruction_stctg),
97 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100 STATS_DESC_COUNTER(VCPU, deliver_ckc),
101 STATS_DESC_COUNTER(VCPU, deliver_cputm),
102 STATS_DESC_COUNTER(VCPU, deliver_external_call),
103 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_virtio),
106 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109 STATS_DESC_COUNTER(VCPU, deliver_program),
110 STATS_DESC_COUNTER(VCPU, deliver_io),
111 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112 STATS_DESC_COUNTER(VCPU, exit_wait_state),
113 STATS_DESC_COUNTER(VCPU, inject_ckc),
114 STATS_DESC_COUNTER(VCPU, inject_cputm),
115 STATS_DESC_COUNTER(VCPU, inject_external_call),
116 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117 STATS_DESC_COUNTER(VCPU, inject_mchk),
118 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119 STATS_DESC_COUNTER(VCPU, inject_program),
120 STATS_DESC_COUNTER(VCPU, inject_restart),
121 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123 STATS_DESC_COUNTER(VCPU, instruction_epsw),
124 STATS_DESC_COUNTER(VCPU, instruction_gs),
125 STATS_DESC_COUNTER(VCPU, instruction_io_other),
126 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129 STATS_DESC_COUNTER(VCPU, instruction_ptff),
130 STATS_DESC_COUNTER(VCPU, instruction_sck),
131 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132 STATS_DESC_COUNTER(VCPU, instruction_stidp),
133 STATS_DESC_COUNTER(VCPU, instruction_spx),
134 STATS_DESC_COUNTER(VCPU, instruction_stpx),
135 STATS_DESC_COUNTER(VCPU, instruction_stap),
136 STATS_DESC_COUNTER(VCPU, instruction_iske),
137 STATS_DESC_COUNTER(VCPU, instruction_ri),
138 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139 STATS_DESC_COUNTER(VCPU, instruction_sske),
140 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141 STATS_DESC_COUNTER(VCPU, instruction_stsi),
142 STATS_DESC_COUNTER(VCPU, instruction_stfl),
143 STATS_DESC_COUNTER(VCPU, instruction_tb),
144 STATS_DESC_COUNTER(VCPU, instruction_tpi),
145 STATS_DESC_COUNTER(VCPU, instruction_tprot),
146 STATS_DESC_COUNTER(VCPU, instruction_tsch),
147 STATS_DESC_COUNTER(VCPU, instruction_sie),
148 STATS_DESC_COUNTER(VCPU, instruction_essa),
149 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
175 STATS_DESC_COUNTER(VCPU, pfault_sync)
178 const struct kvm_stats_header kvm_vcpu_stats_header = {
179 .name_size = KVM_STATS_NAME_SIZE,
180 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
181 .id_offset = sizeof(struct kvm_stats_header),
182 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
183 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
184 sizeof(kvm_vcpu_stats_desc),
187 /* allow nested virtualization in KVM (if enabled by user space) */
189 module_param(nested, int, S_IRUGO);
190 MODULE_PARM_DESC(nested, "Nested virtualization support");
192 /* allow 1m huge page guest backing, if !nested */
194 module_param(hpage, int, 0444);
195 MODULE_PARM_DESC(hpage, "1m huge page backing support");
197 /* maximum percentage of steal time for polling. >100 is treated like 100 */
198 static u8 halt_poll_max_steal = 10;
199 module_param(halt_poll_max_steal, byte, 0644);
200 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
202 /* if set to true, the GISA will be initialized and used if available */
203 static bool use_gisa = true;
204 module_param(use_gisa, bool, 0644);
205 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
207 /* maximum diag9c forwarding per second */
208 unsigned int diag9c_forwarding_hz;
209 module_param(diag9c_forwarding_hz, uint, 0644);
210 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
213 * allow asynchronous deinit for protected guests; enable by default since
214 * the feature is opt-in anyway
216 static int async_destroy = 1;
217 module_param(async_destroy, int, 0444);
218 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
221 * For now we handle at most 16 double words as this is what the s390 base
222 * kernel handles and stores in the prefix page. If we ever need to go beyond
223 * this, this requires changes to code, but the external uapi can stay.
225 #define SIZE_INTERNAL 16
228 * Base feature mask that defines default mask for facilities. Consists of the
229 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
231 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
233 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
234 * and defines the facilities that can be enabled via a cpu model.
236 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
238 static unsigned long kvm_s390_fac_size(void)
240 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
241 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
242 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
243 sizeof(stfle_fac_list));
245 return SIZE_INTERNAL;
248 /* available cpu features supported by kvm */
249 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
250 /* available subfunctions indicated via query / "test bit" */
251 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
253 static struct gmap_notifier gmap_notifier;
254 static struct gmap_notifier vsie_gmap_notifier;
255 debug_info_t *kvm_s390_dbf;
256 debug_info_t *kvm_s390_dbf_uv;
258 /* Section: not file related */
259 /* forward declarations */
260 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
262 static int sca_switch_to_extended(struct kvm *kvm);
264 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
269 * The TOD jumps by delta, we have to compensate this by adding
270 * -delta to the epoch.
274 /* sign-extension - we're adding to signed values below */
279 if (scb->ecd & ECD_MEF) {
280 scb->epdx += delta_idx;
281 if (scb->epoch < delta)
287 * This callback is executed during stop_machine(). All CPUs are therefore
288 * temporarily stopped. In order not to change guest behavior, we have to
289 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
290 * so a CPU won't be stopped while calculating with the epoch.
292 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
296 struct kvm_vcpu *vcpu;
298 unsigned long long *delta = v;
300 list_for_each_entry(kvm, &vm_list, vm_list) {
301 kvm_for_each_vcpu(i, vcpu, kvm) {
302 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
304 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
305 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
307 if (vcpu->arch.cputm_enabled)
308 vcpu->arch.cputm_start += *delta;
309 if (vcpu->arch.vsie_block)
310 kvm_clock_sync_scb(vcpu->arch.vsie_block,
317 static struct notifier_block kvm_clock_notifier = {
318 .notifier_call = kvm_clock_sync,
321 static void allow_cpu_feat(unsigned long nr)
323 set_bit_inv(nr, kvm_s390_available_cpu_feat);
326 static inline int plo_test_bit(unsigned char nr)
328 unsigned long function = (unsigned long)nr | 0x100;
332 " lgr 0,%[function]\n"
333 /* Parameter registers are ignored for "test bit" */
338 : [function] "d" (function)
343 static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
348 /* Parameter registers are ignored */
349 " .insn rrf,%[opc] << 16,2,4,6,0\n"
351 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
352 : "cc", "memory", "0", "1");
355 #define INSN_SORTL 0xb938
356 #define INSN_DFLTCC 0xb939
358 static void __init kvm_s390_cpu_feat_init(void)
362 for (i = 0; i < 256; ++i) {
364 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
367 if (test_facility(28)) /* TOD-clock steering */
368 ptff(kvm_s390_available_subfunc.ptff,
369 sizeof(kvm_s390_available_subfunc.ptff),
372 if (test_facility(17)) { /* MSA */
373 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.kmac);
375 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.kmc);
377 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
378 kvm_s390_available_subfunc.km);
379 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.kimd);
381 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.klmd);
384 if (test_facility(76)) /* MSA3 */
385 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
386 kvm_s390_available_subfunc.pckmo);
387 if (test_facility(77)) { /* MSA4 */
388 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
389 kvm_s390_available_subfunc.kmctr);
390 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kmf);
392 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
393 kvm_s390_available_subfunc.kmo);
394 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.pcc);
397 if (test_facility(57)) /* MSA5 */
398 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
399 kvm_s390_available_subfunc.ppno);
401 if (test_facility(146)) /* MSA8 */
402 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
403 kvm_s390_available_subfunc.kma);
405 if (test_facility(155)) /* MSA9 */
406 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
407 kvm_s390_available_subfunc.kdsa);
409 if (test_facility(150)) /* SORTL */
410 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
412 if (test_facility(151)) /* DFLTCC */
413 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
415 if (MACHINE_HAS_ESOP)
416 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
418 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
419 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
421 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
422 !test_facility(3) || !nested)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
425 if (sclp.has_64bscao)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
438 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
442 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
443 * all skey handling functions read/set the skey from the PGSTE
444 * instead of the real storage key.
446 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
447 * pages being detected as preserved although they are resident.
449 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
450 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
452 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
453 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
454 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
456 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
457 * cannot easily shadow the SCA because of the ipte lock.
461 static int __init __kvm_s390_init(void)
465 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
469 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
470 if (!kvm_s390_dbf_uv)
473 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
474 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
477 kvm_s390_cpu_feat_init();
479 /* Register floating interrupt controller interface. */
480 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
482 pr_err("A FLIC registration call failed with rc=%d\n", rc);
486 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
487 rc = kvm_s390_pci_init();
489 pr_err("Unable to allocate AIFT for PCI\n");
494 rc = kvm_s390_gib_init(GAL_ISC);
498 gmap_notifier.notifier_call = kvm_gmap_notifier;
499 gmap_register_pte_notifier(&gmap_notifier);
500 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
501 gmap_register_pte_notifier(&vsie_gmap_notifier);
502 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
503 &kvm_clock_notifier);
508 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
513 debug_unregister(kvm_s390_dbf_uv);
515 debug_unregister(kvm_s390_dbf);
519 static void __kvm_s390_exit(void)
521 gmap_unregister_pte_notifier(&gmap_notifier);
522 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
523 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
524 &kvm_clock_notifier);
526 kvm_s390_gib_destroy();
527 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
529 debug_unregister(kvm_s390_dbf);
530 debug_unregister(kvm_s390_dbf_uv);
533 /* Section: device related */
534 long kvm_arch_dev_ioctl(struct file *filp,
535 unsigned int ioctl, unsigned long arg)
537 if (ioctl == KVM_S390_ENABLE_SIE)
538 return s390_enable_sie();
542 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
547 case KVM_CAP_S390_PSW:
548 case KVM_CAP_S390_GMAP:
549 case KVM_CAP_SYNC_MMU:
550 #ifdef CONFIG_KVM_S390_UCONTROL
551 case KVM_CAP_S390_UCONTROL:
553 case KVM_CAP_ASYNC_PF:
554 case KVM_CAP_SYNC_REGS:
555 case KVM_CAP_ONE_REG:
556 case KVM_CAP_ENABLE_CAP:
557 case KVM_CAP_S390_CSS_SUPPORT:
558 case KVM_CAP_IOEVENTFD:
559 case KVM_CAP_DEVICE_CTRL:
560 case KVM_CAP_S390_IRQCHIP:
561 case KVM_CAP_VM_ATTRIBUTES:
562 case KVM_CAP_MP_STATE:
563 case KVM_CAP_IMMEDIATE_EXIT:
564 case KVM_CAP_S390_INJECT_IRQ:
565 case KVM_CAP_S390_USER_SIGP:
566 case KVM_CAP_S390_USER_STSI:
567 case KVM_CAP_S390_SKEYS:
568 case KVM_CAP_S390_IRQ_STATE:
569 case KVM_CAP_S390_USER_INSTR0:
570 case KVM_CAP_S390_CMMA_MIGRATION:
571 case KVM_CAP_S390_AIS:
572 case KVM_CAP_S390_AIS_MIGRATION:
573 case KVM_CAP_S390_VCPU_RESETS:
574 case KVM_CAP_SET_GUEST_DEBUG:
575 case KVM_CAP_S390_DIAG318:
576 case KVM_CAP_IRQFD_RESAMPLE:
579 case KVM_CAP_SET_GUEST_DEBUG2:
580 r = KVM_GUESTDBG_VALID_MASK;
582 case KVM_CAP_S390_HPAGE_1M:
584 if (hpage && !kvm_is_ucontrol(kvm))
587 case KVM_CAP_S390_MEM_OP:
590 case KVM_CAP_S390_MEM_OP_EXTENSION:
592 * Flag bits indicating which extensions are supported.
593 * If r > 0, the base extension must also be supported/indicated,
594 * in order to maintain backwards compatibility.
596 r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
597 KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
599 case KVM_CAP_NR_VCPUS:
600 case KVM_CAP_MAX_VCPUS:
601 case KVM_CAP_MAX_VCPU_ID:
602 r = KVM_S390_BSCA_CPU_SLOTS;
603 if (!kvm_s390_use_sca_entries())
605 else if (sclp.has_esca && sclp.has_64bscao)
606 r = KVM_S390_ESCA_CPU_SLOTS;
607 if (ext == KVM_CAP_NR_VCPUS)
608 r = min_t(unsigned int, num_online_cpus(), r);
610 case KVM_CAP_S390_COW:
611 r = MACHINE_HAS_ESOP;
613 case KVM_CAP_S390_VECTOR_REGISTERS:
616 case KVM_CAP_S390_RI:
617 r = test_facility(64);
619 case KVM_CAP_S390_GS:
620 r = test_facility(133);
622 case KVM_CAP_S390_BPB:
623 r = test_facility(82);
625 case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
626 r = async_destroy && is_prot_virt_host();
628 case KVM_CAP_S390_PROTECTED:
629 r = is_prot_virt_host();
631 case KVM_CAP_S390_PROTECTED_DUMP: {
632 u64 pv_cmds_dump[] = {
633 BIT_UVC_CMD_DUMP_INIT,
634 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
635 BIT_UVC_CMD_DUMP_CPU,
636 BIT_UVC_CMD_DUMP_COMPLETE,
640 r = is_prot_virt_host();
642 for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
643 if (!test_bit_inv(pv_cmds_dump[i],
644 (unsigned long *)&uv_info.inst_calls_list)) {
651 case KVM_CAP_S390_ZPCI_OP:
652 r = kvm_s390_pci_interp_allowed();
654 case KVM_CAP_S390_CPU_TOPOLOGY:
655 r = test_facility(11);
663 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
666 gfn_t cur_gfn, last_gfn;
667 unsigned long gaddr, vmaddr;
668 struct gmap *gmap = kvm->arch.gmap;
669 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
671 /* Loop over all guest segments */
672 cur_gfn = memslot->base_gfn;
673 last_gfn = memslot->base_gfn + memslot->npages;
674 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
675 gaddr = gfn_to_gpa(cur_gfn);
676 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
677 if (kvm_is_error_hva(vmaddr))
680 bitmap_zero(bitmap, _PAGE_ENTRIES);
681 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
682 for (i = 0; i < _PAGE_ENTRIES; i++) {
683 if (test_bit(i, bitmap))
684 mark_page_dirty(kvm, cur_gfn + i);
687 if (fatal_signal_pending(current))
693 /* Section: vm related */
694 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
697 * Get (and clear) the dirty memory log for a memory slot.
699 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
700 struct kvm_dirty_log *log)
704 struct kvm_memory_slot *memslot;
707 if (kvm_is_ucontrol(kvm))
710 mutex_lock(&kvm->slots_lock);
713 if (log->slot >= KVM_USER_MEM_SLOTS)
716 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
720 /* Clear the dirty log */
722 n = kvm_dirty_bitmap_bytes(memslot);
723 memset(memslot->dirty_bitmap, 0, n);
727 mutex_unlock(&kvm->slots_lock);
731 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
734 struct kvm_vcpu *vcpu;
736 kvm_for_each_vcpu(i, vcpu, kvm) {
737 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
741 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
749 case KVM_CAP_S390_IRQCHIP:
750 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
751 kvm->arch.use_irqchip = 1;
754 case KVM_CAP_S390_USER_SIGP:
755 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
756 kvm->arch.user_sigp = 1;
759 case KVM_CAP_S390_VECTOR_REGISTERS:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus) {
763 } else if (MACHINE_HAS_VX) {
764 set_kvm_facility(kvm->arch.model.fac_mask, 129);
765 set_kvm_facility(kvm->arch.model.fac_list, 129);
766 if (test_facility(134)) {
767 set_kvm_facility(kvm->arch.model.fac_mask, 134);
768 set_kvm_facility(kvm->arch.model.fac_list, 134);
770 if (test_facility(135)) {
771 set_kvm_facility(kvm->arch.model.fac_mask, 135);
772 set_kvm_facility(kvm->arch.model.fac_list, 135);
774 if (test_facility(148)) {
775 set_kvm_facility(kvm->arch.model.fac_mask, 148);
776 set_kvm_facility(kvm->arch.model.fac_list, 148);
778 if (test_facility(152)) {
779 set_kvm_facility(kvm->arch.model.fac_mask, 152);
780 set_kvm_facility(kvm->arch.model.fac_list, 152);
782 if (test_facility(192)) {
783 set_kvm_facility(kvm->arch.model.fac_mask, 192);
784 set_kvm_facility(kvm->arch.model.fac_list, 192);
789 mutex_unlock(&kvm->lock);
790 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
791 r ? "(not available)" : "(success)");
793 case KVM_CAP_S390_RI:
795 mutex_lock(&kvm->lock);
796 if (kvm->created_vcpus) {
798 } else if (test_facility(64)) {
799 set_kvm_facility(kvm->arch.model.fac_mask, 64);
800 set_kvm_facility(kvm->arch.model.fac_list, 64);
803 mutex_unlock(&kvm->lock);
804 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
805 r ? "(not available)" : "(success)");
807 case KVM_CAP_S390_AIS:
808 mutex_lock(&kvm->lock);
809 if (kvm->created_vcpus) {
812 set_kvm_facility(kvm->arch.model.fac_mask, 72);
813 set_kvm_facility(kvm->arch.model.fac_list, 72);
816 mutex_unlock(&kvm->lock);
817 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
818 r ? "(not available)" : "(success)");
820 case KVM_CAP_S390_GS:
822 mutex_lock(&kvm->lock);
823 if (kvm->created_vcpus) {
825 } else if (test_facility(133)) {
826 set_kvm_facility(kvm->arch.model.fac_mask, 133);
827 set_kvm_facility(kvm->arch.model.fac_list, 133);
830 mutex_unlock(&kvm->lock);
831 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
832 r ? "(not available)" : "(success)");
834 case KVM_CAP_S390_HPAGE_1M:
835 mutex_lock(&kvm->lock);
836 if (kvm->created_vcpus)
838 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
842 mmap_write_lock(kvm->mm);
843 kvm->mm->context.allow_gmap_hpage_1m = 1;
844 mmap_write_unlock(kvm->mm);
846 * We might have to create fake 4k page
847 * tables. To avoid that the hardware works on
848 * stale PGSTEs, we emulate these instructions.
850 kvm->arch.use_skf = 0;
851 kvm->arch.use_pfmfi = 0;
853 mutex_unlock(&kvm->lock);
854 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
855 r ? "(not available)" : "(success)");
857 case KVM_CAP_S390_USER_STSI:
858 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
859 kvm->arch.user_stsi = 1;
862 case KVM_CAP_S390_USER_INSTR0:
863 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
864 kvm->arch.user_instr0 = 1;
865 icpt_operexc_on_all_vcpus(kvm);
868 case KVM_CAP_S390_CPU_TOPOLOGY:
870 mutex_lock(&kvm->lock);
871 if (kvm->created_vcpus) {
873 } else if (test_facility(11)) {
874 set_kvm_facility(kvm->arch.model.fac_mask, 11);
875 set_kvm_facility(kvm->arch.model.fac_list, 11);
878 mutex_unlock(&kvm->lock);
879 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
880 r ? "(not available)" : "(success)");
889 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
893 switch (attr->attr) {
894 case KVM_S390_VM_MEM_LIMIT_SIZE:
896 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
897 kvm->arch.mem_limit);
898 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
908 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
912 switch (attr->attr) {
913 case KVM_S390_VM_MEM_ENABLE_CMMA:
918 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
919 mutex_lock(&kvm->lock);
920 if (kvm->created_vcpus)
922 else if (kvm->mm->context.allow_gmap_hpage_1m)
925 kvm->arch.use_cmma = 1;
926 /* Not compatible with cmma. */
927 kvm->arch.use_pfmfi = 0;
930 mutex_unlock(&kvm->lock);
932 case KVM_S390_VM_MEM_CLR_CMMA:
937 if (!kvm->arch.use_cmma)
940 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
941 mutex_lock(&kvm->lock);
942 idx = srcu_read_lock(&kvm->srcu);
943 s390_reset_cmma(kvm->arch.gmap->mm);
944 srcu_read_unlock(&kvm->srcu, idx);
945 mutex_unlock(&kvm->lock);
948 case KVM_S390_VM_MEM_LIMIT_SIZE: {
949 unsigned long new_limit;
951 if (kvm_is_ucontrol(kvm))
954 if (get_user(new_limit, (u64 __user *)attr->addr))
957 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
958 new_limit > kvm->arch.mem_limit)
964 /* gmap_create takes last usable address */
965 if (new_limit != KVM_S390_NO_MEM_LIMIT)
969 mutex_lock(&kvm->lock);
970 if (!kvm->created_vcpus) {
971 /* gmap_create will round the limit up */
972 struct gmap *new = gmap_create(current->mm, new_limit);
977 gmap_remove(kvm->arch.gmap);
979 kvm->arch.gmap = new;
983 mutex_unlock(&kvm->lock);
984 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
985 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
986 (void *) kvm->arch.gmap->asce);
996 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
998 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1000 struct kvm_vcpu *vcpu;
1003 kvm_s390_vcpu_block_all(kvm);
1005 kvm_for_each_vcpu(i, vcpu, kvm) {
1006 kvm_s390_vcpu_crypto_setup(vcpu);
1007 /* recreate the shadow crycb by leaving the VSIE handler */
1008 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1011 kvm_s390_vcpu_unblock_all(kvm);
1014 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1016 mutex_lock(&kvm->lock);
1017 switch (attr->attr) {
1018 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1019 if (!test_kvm_facility(kvm, 76)) {
1020 mutex_unlock(&kvm->lock);
1024 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1025 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1026 kvm->arch.crypto.aes_kw = 1;
1027 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1029 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1030 if (!test_kvm_facility(kvm, 76)) {
1031 mutex_unlock(&kvm->lock);
1035 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1036 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1037 kvm->arch.crypto.dea_kw = 1;
1038 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1040 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1041 if (!test_kvm_facility(kvm, 76)) {
1042 mutex_unlock(&kvm->lock);
1045 kvm->arch.crypto.aes_kw = 0;
1046 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1047 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1048 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1050 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1051 if (!test_kvm_facility(kvm, 76)) {
1052 mutex_unlock(&kvm->lock);
1055 kvm->arch.crypto.dea_kw = 0;
1056 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1057 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1058 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1060 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1061 if (!ap_instructions_available()) {
1062 mutex_unlock(&kvm->lock);
1065 kvm->arch.crypto.apie = 1;
1067 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1068 if (!ap_instructions_available()) {
1069 mutex_unlock(&kvm->lock);
1072 kvm->arch.crypto.apie = 0;
1075 mutex_unlock(&kvm->lock);
1079 kvm_s390_vcpu_crypto_reset_all(kvm);
1080 mutex_unlock(&kvm->lock);
1084 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1086 /* Only set the ECB bits after guest requests zPCI interpretation */
1087 if (!vcpu->kvm->arch.use_zpci_interp)
1090 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1091 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1094 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1096 struct kvm_vcpu *vcpu;
1099 lockdep_assert_held(&kvm->lock);
1101 if (!kvm_s390_pci_interp_allowed())
1105 * If host is configured for PCI and the necessary facilities are
1106 * available, turn on interpretation for the life of this guest
1108 kvm->arch.use_zpci_interp = 1;
1110 kvm_s390_vcpu_block_all(kvm);
1112 kvm_for_each_vcpu(i, vcpu, kvm) {
1113 kvm_s390_vcpu_pci_setup(vcpu);
1114 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1117 kvm_s390_vcpu_unblock_all(kvm);
1120 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1123 struct kvm_vcpu *vcpu;
1125 kvm_for_each_vcpu(cx, vcpu, kvm)
1126 kvm_s390_sync_request(req, vcpu);
1130 * Must be called with kvm->srcu held to avoid races on memslots, and with
1131 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1133 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1135 struct kvm_memory_slot *ms;
1136 struct kvm_memslots *slots;
1137 unsigned long ram_pages = 0;
1140 /* migration mode already enabled */
1141 if (kvm->arch.migration_mode)
1143 slots = kvm_memslots(kvm);
1144 if (!slots || kvm_memslots_empty(slots))
1147 if (!kvm->arch.use_cmma) {
1148 kvm->arch.migration_mode = 1;
1151 /* mark all the pages in active slots as dirty */
1152 kvm_for_each_memslot(ms, bkt, slots) {
1153 if (!ms->dirty_bitmap)
1156 * The second half of the bitmap is only used on x86,
1157 * and would be wasted otherwise, so we put it to good
1158 * use here to keep track of the state of the storage
1161 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1162 ram_pages += ms->npages;
1164 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1165 kvm->arch.migration_mode = 1;
1166 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1171 * Must be called with kvm->slots_lock to avoid races with ourselves and
1172 * kvm_s390_vm_start_migration.
1174 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1176 /* migration mode already disabled */
1177 if (!kvm->arch.migration_mode)
1179 kvm->arch.migration_mode = 0;
1180 if (kvm->arch.use_cmma)
1181 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1185 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1186 struct kvm_device_attr *attr)
1190 mutex_lock(&kvm->slots_lock);
1191 switch (attr->attr) {
1192 case KVM_S390_VM_MIGRATION_START:
1193 res = kvm_s390_vm_start_migration(kvm);
1195 case KVM_S390_VM_MIGRATION_STOP:
1196 res = kvm_s390_vm_stop_migration(kvm);
1201 mutex_unlock(&kvm->slots_lock);
1206 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1207 struct kvm_device_attr *attr)
1209 u64 mig = kvm->arch.migration_mode;
1211 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1214 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1219 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1221 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1223 struct kvm_s390_vm_tod_clock gtod;
1225 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1228 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1230 __kvm_s390_set_tod_clock(kvm, >od);
1232 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1233 gtod.epoch_idx, gtod.tod);
1238 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1242 if (copy_from_user(>od_high, (void __user *)attr->addr,
1248 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1253 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1255 struct kvm_s390_vm_tod_clock gtod = { 0 };
1257 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1261 __kvm_s390_set_tod_clock(kvm, >od);
1262 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1266 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1273 mutex_lock(&kvm->lock);
1275 * For protected guests, the TOD is managed by the ultravisor, so trying
1276 * to change it will never bring the expected results.
1278 if (kvm_s390_pv_is_protected(kvm)) {
1283 switch (attr->attr) {
1284 case KVM_S390_VM_TOD_EXT:
1285 ret = kvm_s390_set_tod_ext(kvm, attr);
1287 case KVM_S390_VM_TOD_HIGH:
1288 ret = kvm_s390_set_tod_high(kvm, attr);
1290 case KVM_S390_VM_TOD_LOW:
1291 ret = kvm_s390_set_tod_low(kvm, attr);
1299 mutex_unlock(&kvm->lock);
1303 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1304 struct kvm_s390_vm_tod_clock *gtod)
1306 union tod_clock clk;
1310 store_tod_clock_ext(&clk);
1312 gtod->tod = clk.tod + kvm->arch.epoch;
1313 gtod->epoch_idx = 0;
1314 if (test_kvm_facility(kvm, 139)) {
1315 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1316 if (gtod->tod < clk.tod)
1317 gtod->epoch_idx += 1;
1323 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1325 struct kvm_s390_vm_tod_clock gtod;
1327 memset(>od, 0, sizeof(gtod));
1328 kvm_s390_get_tod_clock(kvm, >od);
1329 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1332 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1333 gtod.epoch_idx, gtod.tod);
1337 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1341 if (copy_to_user((void __user *)attr->addr, >od_high,
1344 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1349 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1353 gtod = kvm_s390_get_tod_clock_fast(kvm);
1354 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1356 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1361 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1368 switch (attr->attr) {
1369 case KVM_S390_VM_TOD_EXT:
1370 ret = kvm_s390_get_tod_ext(kvm, attr);
1372 case KVM_S390_VM_TOD_HIGH:
1373 ret = kvm_s390_get_tod_high(kvm, attr);
1375 case KVM_S390_VM_TOD_LOW:
1376 ret = kvm_s390_get_tod_low(kvm, attr);
1385 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1387 struct kvm_s390_vm_cpu_processor *proc;
1388 u16 lowest_ibc, unblocked_ibc;
1391 mutex_lock(&kvm->lock);
1392 if (kvm->created_vcpus) {
1396 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1401 if (!copy_from_user(proc, (void __user *)attr->addr,
1403 kvm->arch.model.cpuid = proc->cpuid;
1404 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1405 unblocked_ibc = sclp.ibc & 0xfff;
1406 if (lowest_ibc && proc->ibc) {
1407 if (proc->ibc > unblocked_ibc)
1408 kvm->arch.model.ibc = unblocked_ibc;
1409 else if (proc->ibc < lowest_ibc)
1410 kvm->arch.model.ibc = lowest_ibc;
1412 kvm->arch.model.ibc = proc->ibc;
1414 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1415 S390_ARCH_FAC_LIST_SIZE_BYTE);
1416 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1417 kvm->arch.model.ibc,
1418 kvm->arch.model.cpuid);
1419 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1420 kvm->arch.model.fac_list[0],
1421 kvm->arch.model.fac_list[1],
1422 kvm->arch.model.fac_list[2]);
1427 mutex_unlock(&kvm->lock);
1431 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1432 struct kvm_device_attr *attr)
1434 struct kvm_s390_vm_cpu_feat data;
1436 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1438 if (!bitmap_subset((unsigned long *) data.feat,
1439 kvm_s390_available_cpu_feat,
1440 KVM_S390_VM_CPU_FEAT_NR_BITS))
1443 mutex_lock(&kvm->lock);
1444 if (kvm->created_vcpus) {
1445 mutex_unlock(&kvm->lock);
1448 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1449 mutex_unlock(&kvm->lock);
1450 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1457 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1458 struct kvm_device_attr *attr)
1460 mutex_lock(&kvm->lock);
1461 if (kvm->created_vcpus) {
1462 mutex_unlock(&kvm->lock);
1466 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1467 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1468 mutex_unlock(&kvm->lock);
1471 mutex_unlock(&kvm->lock);
1473 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1474 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1475 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1476 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1477 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1478 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1479 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1480 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1481 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1482 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1483 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1484 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1485 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1486 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1487 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1488 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1489 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1490 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1491 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1492 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1493 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1494 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1495 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1496 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1497 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1498 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1499 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1501 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1502 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1503 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1505 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1508 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1511 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1514 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1517 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1520 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1524 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1525 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1526 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1534 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1538 switch (attr->attr) {
1539 case KVM_S390_VM_CPU_PROCESSOR:
1540 ret = kvm_s390_set_processor(kvm, attr);
1542 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1543 ret = kvm_s390_set_processor_feat(kvm, attr);
1545 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1546 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1552 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1554 struct kvm_s390_vm_cpu_processor *proc;
1557 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1562 proc->cpuid = kvm->arch.model.cpuid;
1563 proc->ibc = kvm->arch.model.ibc;
1564 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1565 S390_ARCH_FAC_LIST_SIZE_BYTE);
1566 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1567 kvm->arch.model.ibc,
1568 kvm->arch.model.cpuid);
1569 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1570 kvm->arch.model.fac_list[0],
1571 kvm->arch.model.fac_list[1],
1572 kvm->arch.model.fac_list[2]);
1573 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1580 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1582 struct kvm_s390_vm_cpu_machine *mach;
1585 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1590 get_cpu_id((struct cpuid *) &mach->cpuid);
1591 mach->ibc = sclp.ibc;
1592 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1593 S390_ARCH_FAC_LIST_SIZE_BYTE);
1594 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1595 sizeof(stfle_fac_list));
1596 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1597 kvm->arch.model.ibc,
1598 kvm->arch.model.cpuid);
1599 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1603 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1607 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1614 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1615 struct kvm_device_attr *attr)
1617 struct kvm_s390_vm_cpu_feat data;
1619 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1620 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1622 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1629 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1630 struct kvm_device_attr *attr)
1632 struct kvm_s390_vm_cpu_feat data;
1634 bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1635 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1637 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1644 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1645 struct kvm_device_attr *attr)
1647 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1648 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1651 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1652 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1653 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1654 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1655 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1656 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1657 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1658 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1659 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1660 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1661 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1662 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1663 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1664 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1665 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1666 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1667 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1668 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1669 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1670 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1671 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1672 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1673 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1674 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1675 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1676 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1677 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1678 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1679 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1680 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1681 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1682 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1683 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1684 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1685 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1686 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1687 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1688 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1689 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1690 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1691 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1692 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1693 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1694 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1695 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1696 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1697 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1698 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1699 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1700 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1701 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1702 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1703 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1704 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1705 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1706 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1707 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1712 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1713 struct kvm_device_attr *attr)
1715 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1716 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1719 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1720 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1721 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1722 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1723 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1724 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1725 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1726 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1727 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1728 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1729 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1730 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1731 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1732 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1733 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1734 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1735 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1736 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1737 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1738 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1739 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1740 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1741 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1742 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1743 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1744 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1745 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1746 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1747 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1748 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1749 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1750 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1751 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1752 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1753 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1754 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1755 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1756 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1757 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1758 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1759 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1760 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1761 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1762 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1763 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1764 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1765 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1766 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1767 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1768 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1769 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1770 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1771 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1772 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1773 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1774 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1775 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1780 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1784 switch (attr->attr) {
1785 case KVM_S390_VM_CPU_PROCESSOR:
1786 ret = kvm_s390_get_processor(kvm, attr);
1788 case KVM_S390_VM_CPU_MACHINE:
1789 ret = kvm_s390_get_machine(kvm, attr);
1791 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1792 ret = kvm_s390_get_processor_feat(kvm, attr);
1794 case KVM_S390_VM_CPU_MACHINE_FEAT:
1795 ret = kvm_s390_get_machine_feat(kvm, attr);
1797 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1798 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1800 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1801 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1808 * kvm_s390_update_topology_change_report - update CPU topology change report
1809 * @kvm: guest KVM description
1810 * @val: set or clear the MTCR bit
1812 * Updates the Multiprocessor Topology-Change-Report bit to signal
1813 * the guest with a topology change.
1814 * This is only relevant if the topology facility is present.
1816 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1818 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1820 union sca_utility new, old;
1821 struct bsca_block *sca;
1823 read_lock(&kvm->arch.sca_lock);
1824 sca = kvm->arch.sca;
1826 old = READ_ONCE(sca->utility);
1829 } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
1830 read_unlock(&kvm->arch.sca_lock);
1833 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1834 struct kvm_device_attr *attr)
1836 if (!test_kvm_facility(kvm, 11))
1839 kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1843 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1844 struct kvm_device_attr *attr)
1848 if (!test_kvm_facility(kvm, 11))
1851 read_lock(&kvm->arch.sca_lock);
1852 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1853 read_unlock(&kvm->arch.sca_lock);
1855 return put_user(topo, (u8 __user *)attr->addr);
1858 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1862 switch (attr->group) {
1863 case KVM_S390_VM_MEM_CTRL:
1864 ret = kvm_s390_set_mem_control(kvm, attr);
1866 case KVM_S390_VM_TOD:
1867 ret = kvm_s390_set_tod(kvm, attr);
1869 case KVM_S390_VM_CPU_MODEL:
1870 ret = kvm_s390_set_cpu_model(kvm, attr);
1872 case KVM_S390_VM_CRYPTO:
1873 ret = kvm_s390_vm_set_crypto(kvm, attr);
1875 case KVM_S390_VM_MIGRATION:
1876 ret = kvm_s390_vm_set_migration(kvm, attr);
1878 case KVM_S390_VM_CPU_TOPOLOGY:
1879 ret = kvm_s390_set_topo_change_indication(kvm, attr);
1889 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1893 switch (attr->group) {
1894 case KVM_S390_VM_MEM_CTRL:
1895 ret = kvm_s390_get_mem_control(kvm, attr);
1897 case KVM_S390_VM_TOD:
1898 ret = kvm_s390_get_tod(kvm, attr);
1900 case KVM_S390_VM_CPU_MODEL:
1901 ret = kvm_s390_get_cpu_model(kvm, attr);
1903 case KVM_S390_VM_MIGRATION:
1904 ret = kvm_s390_vm_get_migration(kvm, attr);
1906 case KVM_S390_VM_CPU_TOPOLOGY:
1907 ret = kvm_s390_get_topo_change_indication(kvm, attr);
1917 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1921 switch (attr->group) {
1922 case KVM_S390_VM_MEM_CTRL:
1923 switch (attr->attr) {
1924 case KVM_S390_VM_MEM_ENABLE_CMMA:
1925 case KVM_S390_VM_MEM_CLR_CMMA:
1926 ret = sclp.has_cmma ? 0 : -ENXIO;
1928 case KVM_S390_VM_MEM_LIMIT_SIZE:
1936 case KVM_S390_VM_TOD:
1937 switch (attr->attr) {
1938 case KVM_S390_VM_TOD_LOW:
1939 case KVM_S390_VM_TOD_HIGH:
1947 case KVM_S390_VM_CPU_MODEL:
1948 switch (attr->attr) {
1949 case KVM_S390_VM_CPU_PROCESSOR:
1950 case KVM_S390_VM_CPU_MACHINE:
1951 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1952 case KVM_S390_VM_CPU_MACHINE_FEAT:
1953 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1954 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1962 case KVM_S390_VM_CRYPTO:
1963 switch (attr->attr) {
1964 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1965 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1966 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1967 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1970 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1971 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1972 ret = ap_instructions_available() ? 0 : -ENXIO;
1979 case KVM_S390_VM_MIGRATION:
1982 case KVM_S390_VM_CPU_TOPOLOGY:
1983 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
1993 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1997 int srcu_idx, i, r = 0;
1999 if (args->flags != 0)
2002 /* Is this guest using storage keys? */
2003 if (!mm_uses_skeys(current->mm))
2004 return KVM_S390_GET_SKEYS_NONE;
2006 /* Enforce sane limit on memory allocation */
2007 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2010 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2014 mmap_read_lock(current->mm);
2015 srcu_idx = srcu_read_lock(&kvm->srcu);
2016 for (i = 0; i < args->count; i++) {
2017 hva = gfn_to_hva(kvm, args->start_gfn + i);
2018 if (kvm_is_error_hva(hva)) {
2023 r = get_guest_storage_key(current->mm, hva, &keys[i]);
2027 srcu_read_unlock(&kvm->srcu, srcu_idx);
2028 mmap_read_unlock(current->mm);
2031 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2032 sizeof(uint8_t) * args->count);
2041 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2045 int srcu_idx, i, r = 0;
2048 if (args->flags != 0)
2051 /* Enforce sane limit on memory allocation */
2052 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2055 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2059 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2060 sizeof(uint8_t) * args->count);
2066 /* Enable storage key handling for the guest */
2067 r = s390_enable_skey();
2072 mmap_read_lock(current->mm);
2073 srcu_idx = srcu_read_lock(&kvm->srcu);
2074 while (i < args->count) {
2076 hva = gfn_to_hva(kvm, args->start_gfn + i);
2077 if (kvm_is_error_hva(hva)) {
2082 /* Lowest order bit is reserved */
2083 if (keys[i] & 0x01) {
2088 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2090 r = fixup_user_fault(current->mm, hva,
2091 FAULT_FLAG_WRITE, &unlocked);
2098 srcu_read_unlock(&kvm->srcu, srcu_idx);
2099 mmap_read_unlock(current->mm);
2106 * Base address and length must be sent at the start of each block, therefore
2107 * it's cheaper to send some clean data, as long as it's less than the size of
2110 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2111 /* for consistency */
2112 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2114 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2115 u8 *res, unsigned long bufsize)
2117 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2120 while (args->count < bufsize) {
2121 hva = gfn_to_hva(kvm, cur_gfn);
2123 * We return an error if the first value was invalid, but we
2124 * return successfully if at least one value was copied.
2126 if (kvm_is_error_hva(hva))
2127 return args->count ? 0 : -EFAULT;
2128 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2130 res[args->count++] = (pgstev >> 24) & 0x43;
2137 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2140 return ____gfn_to_memslot(slots, gfn, true);
2143 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2144 unsigned long cur_gfn)
2146 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2147 unsigned long ofs = cur_gfn - ms->base_gfn;
2148 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2150 if (ms->base_gfn + ms->npages <= cur_gfn) {
2151 mnode = rb_next(mnode);
2152 /* If we are above the highest slot, wrap around */
2154 mnode = rb_first(&slots->gfn_tree);
2156 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2160 if (cur_gfn < ms->base_gfn)
2163 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2164 while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2165 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2166 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2168 return ms->base_gfn + ofs;
2171 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2172 u8 *res, unsigned long bufsize)
2174 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2175 struct kvm_memslots *slots = kvm_memslots(kvm);
2176 struct kvm_memory_slot *ms;
2178 if (unlikely(kvm_memslots_empty(slots)))
2181 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2182 ms = gfn_to_memslot(kvm, cur_gfn);
2184 args->start_gfn = cur_gfn;
2187 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2188 mem_end = kvm_s390_get_gfn_end(slots);
2190 while (args->count < bufsize) {
2191 hva = gfn_to_hva(kvm, cur_gfn);
2192 if (kvm_is_error_hva(hva))
2194 /* Decrement only if we actually flipped the bit to 0 */
2195 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2196 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2197 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2199 /* Save the value */
2200 res[args->count++] = (pgstev >> 24) & 0x43;
2201 /* If the next bit is too far away, stop. */
2202 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2204 /* If we reached the previous "next", find the next one */
2205 if (cur_gfn == next_gfn)
2206 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2207 /* Reached the end of memory or of the buffer, stop */
2208 if ((next_gfn >= mem_end) ||
2209 (next_gfn - args->start_gfn >= bufsize))
2212 /* Reached the end of the current memslot, take the next one. */
2213 if (cur_gfn - ms->base_gfn >= ms->npages) {
2214 ms = gfn_to_memslot(kvm, cur_gfn);
2223 * This function searches for the next page with dirty CMMA attributes, and
2224 * saves the attributes in the buffer up to either the end of the buffer or
2225 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2226 * no trailing clean bytes are saved.
2227 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2228 * output buffer will indicate 0 as length.
2230 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2231 struct kvm_s390_cmma_log *args)
2233 unsigned long bufsize;
2234 int srcu_idx, peek, ret;
2237 if (!kvm->arch.use_cmma)
2239 /* Invalid/unsupported flags were specified */
2240 if (args->flags & ~KVM_S390_CMMA_PEEK)
2242 /* Migration mode query, and we are not doing a migration */
2243 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2244 if (!peek && !kvm->arch.migration_mode)
2246 /* CMMA is disabled or was not used, or the buffer has length zero */
2247 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2248 if (!bufsize || !kvm->mm->context.uses_cmm) {
2249 memset(args, 0, sizeof(*args));
2252 /* We are not peeking, and there are no dirty pages */
2253 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2254 memset(args, 0, sizeof(*args));
2258 values = vmalloc(bufsize);
2262 mmap_read_lock(kvm->mm);
2263 srcu_idx = srcu_read_lock(&kvm->srcu);
2265 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2267 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2268 srcu_read_unlock(&kvm->srcu, srcu_idx);
2269 mmap_read_unlock(kvm->mm);
2271 if (kvm->arch.migration_mode)
2272 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2274 args->remaining = 0;
2276 if (copy_to_user((void __user *)args->values, values, args->count))
2284 * This function sets the CMMA attributes for the given pages. If the input
2285 * buffer has zero length, no action is taken, otherwise the attributes are
2286 * set and the mm->context.uses_cmm flag is set.
2288 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2289 const struct kvm_s390_cmma_log *args)
2291 unsigned long hva, mask, pgstev, i;
2293 int srcu_idx, r = 0;
2297 if (!kvm->arch.use_cmma)
2299 /* invalid/unsupported flags */
2300 if (args->flags != 0)
2302 /* Enforce sane limit on memory allocation */
2303 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2306 if (args->count == 0)
2309 bits = vmalloc(array_size(sizeof(*bits), args->count));
2313 r = copy_from_user(bits, (void __user *)args->values, args->count);
2319 mmap_read_lock(kvm->mm);
2320 srcu_idx = srcu_read_lock(&kvm->srcu);
2321 for (i = 0; i < args->count; i++) {
2322 hva = gfn_to_hva(kvm, args->start_gfn + i);
2323 if (kvm_is_error_hva(hva)) {
2329 pgstev = pgstev << 24;
2330 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2331 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2333 srcu_read_unlock(&kvm->srcu, srcu_idx);
2334 mmap_read_unlock(kvm->mm);
2336 if (!kvm->mm->context.uses_cmm) {
2337 mmap_write_lock(kvm->mm);
2338 kvm->mm->context.uses_cmm = 1;
2339 mmap_write_unlock(kvm->mm);
2347 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2349 * @kvm: the VM whose protected vCPUs are to be converted
2350 * @rc: return value for the RC field of the UVC (in case of error)
2351 * @rrc: return value for the RRC field of the UVC (in case of error)
2353 * Does not stop in case of error, tries to convert as many
2354 * CPUs as possible. In case of error, the RC and RRC of the last error are
2357 * Return: 0 in case of success, otherwise -EIO
2359 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2361 struct kvm_vcpu *vcpu;
2367 * We ignore failures and try to destroy as many CPUs as possible.
2368 * At the same time we must not free the assigned resources when
2369 * this fails, as the ultravisor has still access to that memory.
2370 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2372 * We want to return the first failure rc and rrc, though.
2374 kvm_for_each_vcpu(i, vcpu, kvm) {
2375 mutex_lock(&vcpu->mutex);
2376 if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2381 mutex_unlock(&vcpu->mutex);
2383 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2385 kvm_s390_gisa_enable(kvm);
2390 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2392 * @kvm: the VM whose protected vCPUs are to be converted
2393 * @rc: return value for the RC field of the UVC (in case of error)
2394 * @rrc: return value for the RRC field of the UVC (in case of error)
2396 * Tries to undo the conversion in case of error.
2398 * Return: 0 in case of success, otherwise -EIO
2400 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2406 struct kvm_vcpu *vcpu;
2408 /* Disable the GISA if the ultravisor does not support AIV. */
2409 if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2410 kvm_s390_gisa_disable(kvm);
2412 kvm_for_each_vcpu(i, vcpu, kvm) {
2413 mutex_lock(&vcpu->mutex);
2414 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2415 mutex_unlock(&vcpu->mutex);
2420 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2425 * Here we provide user space with a direct interface to query UV
2426 * related data like UV maxima and available features as well as
2427 * feature specific data.
2429 * To facilitate future extension of the data structures we'll try to
2430 * write data up to the maximum requested length.
2432 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2436 switch (info->header.id) {
2437 case KVM_PV_INFO_VM: {
2438 len_min = sizeof(info->header) + sizeof(info->vm);
2440 if (info->header.len_max < len_min)
2443 memcpy(info->vm.inst_calls_list,
2444 uv_info.inst_calls_list,
2445 sizeof(uv_info.inst_calls_list));
2447 /* It's max cpuid not max cpus, so it's off by one */
2448 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2449 info->vm.max_guests = uv_info.max_num_sec_conf;
2450 info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2451 info->vm.feature_indication = uv_info.uv_feature_indications;
2455 case KVM_PV_INFO_DUMP: {
2456 len_min = sizeof(info->header) + sizeof(info->dump);
2458 if (info->header.len_max < len_min)
2461 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2462 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2463 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2471 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2472 struct kvm_s390_pv_dmp dmp)
2475 void __user *result_buff = (void __user *)dmp.buff_addr;
2477 switch (dmp.subcmd) {
2478 case KVM_PV_DUMP_INIT: {
2479 if (kvm->arch.pv.dumping)
2483 * Block SIE entry as concurrent dump UVCs could lead
2486 kvm_s390_vcpu_block_all(kvm);
2488 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2489 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2490 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2493 kvm->arch.pv.dumping = true;
2495 kvm_s390_vcpu_unblock_all(kvm);
2500 case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2501 if (!kvm->arch.pv.dumping)
2505 * gaddr is an output parameter since we might stop
2506 * early. As dmp will be copied back in our caller, we
2507 * don't need to do it ourselves.
2509 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2510 &cmd->rc, &cmd->rrc);
2513 case KVM_PV_DUMP_COMPLETE: {
2514 if (!kvm->arch.pv.dumping)
2518 if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2521 r = kvm_s390_pv_dump_complete(kvm, result_buff,
2522 &cmd->rc, &cmd->rrc);
2533 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2535 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2536 void __user *argp = (void __user *)cmd->data;
2541 mutex_lock(&kvm->lock);
2544 case KVM_PV_ENABLE: {
2546 if (kvm_s390_pv_is_protected(kvm))
2550 * FMT 4 SIE needs esca. As we never switch back to bsca from
2551 * esca, we need no cleanup in the error cases below
2553 r = sca_switch_to_extended(kvm);
2557 mmap_write_lock(current->mm);
2558 r = gmap_mark_unmergeable();
2559 mmap_write_unlock(current->mm);
2563 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2567 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2569 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2571 /* we need to block service interrupts from now on */
2572 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2575 case KVM_PV_ASYNC_CLEANUP_PREPARE:
2577 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2580 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2582 * If a CPU could not be destroyed, destroy VM will also fail.
2583 * There is no point in trying to destroy it. Instead return
2584 * the rc and rrc from the first CPU that failed destroying.
2588 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2590 /* no need to block service interrupts any more */
2591 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2593 case KVM_PV_ASYNC_CLEANUP_PERFORM:
2597 /* kvm->lock must not be held; this is asserted inside the function. */
2598 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2600 case KVM_PV_DISABLE: {
2602 if (!kvm_s390_pv_is_protected(kvm))
2605 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2607 * If a CPU could not be destroyed, destroy VM will also fail.
2608 * There is no point in trying to destroy it. Instead return
2609 * the rc and rrc from the first CPU that failed destroying.
2613 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2615 /* no need to block service interrupts any more */
2616 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2619 case KVM_PV_SET_SEC_PARMS: {
2620 struct kvm_s390_pv_sec_parm parms = {};
2624 if (!kvm_s390_pv_is_protected(kvm))
2628 if (copy_from_user(&parms, argp, sizeof(parms)))
2631 /* Currently restricted to 8KB */
2633 if (parms.length > PAGE_SIZE * 2)
2637 hdr = vmalloc(parms.length);
2642 if (!copy_from_user(hdr, (void __user *)parms.origin,
2644 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2645 &cmd->rc, &cmd->rrc);
2650 case KVM_PV_UNPACK: {
2651 struct kvm_s390_pv_unp unp = {};
2654 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2658 if (copy_from_user(&unp, argp, sizeof(unp)))
2661 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2662 &cmd->rc, &cmd->rrc);
2665 case KVM_PV_VERIFY: {
2667 if (!kvm_s390_pv_is_protected(kvm))
2670 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2671 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2672 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2676 case KVM_PV_PREP_RESET: {
2678 if (!kvm_s390_pv_is_protected(kvm))
2681 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2682 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2683 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2687 case KVM_PV_UNSHARE_ALL: {
2689 if (!kvm_s390_pv_is_protected(kvm))
2692 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2693 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2694 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2699 struct kvm_s390_pv_info info = {};
2703 * No need to check the VM protection here.
2705 * Maybe user space wants to query some of the data
2706 * when the VM is still unprotected. If we see the
2707 * need to fence a new data command we can still
2708 * return an error in the info handler.
2712 if (copy_from_user(&info, argp, sizeof(info.header)))
2716 if (info.header.len_max < sizeof(info.header))
2719 data_len = kvm_s390_handle_pv_info(&info);
2725 * If a data command struct is extended (multiple
2726 * times) this can be used to determine how much of it
2729 info.header.len_written = data_len;
2732 if (copy_to_user(argp, &info, data_len))
2739 struct kvm_s390_pv_dmp dmp;
2742 if (!kvm_s390_pv_is_protected(kvm))
2746 if (copy_from_user(&dmp, argp, sizeof(dmp)))
2749 r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2753 if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2764 mutex_unlock(&kvm->lock);
2769 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2771 if (mop->flags & ~supported_flags || !mop->size)
2773 if (mop->size > MEM_OP_MAX_SIZE)
2775 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2784 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2786 void __user *uaddr = (void __user *)mop->buf;
2787 enum gacc_mode acc_mode;
2788 void *tmpbuf = NULL;
2791 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2792 KVM_S390_MEMOP_F_CHECK_ONLY);
2796 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2797 tmpbuf = vmalloc(mop->size);
2802 srcu_idx = srcu_read_lock(&kvm->srcu);
2804 if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2809 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2810 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2811 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2814 if (acc_mode == GACC_FETCH) {
2815 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2816 mop->size, GACC_FETCH, mop->key);
2819 if (copy_to_user(uaddr, tmpbuf, mop->size))
2822 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2826 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2827 mop->size, GACC_STORE, mop->key);
2831 srcu_read_unlock(&kvm->srcu, srcu_idx);
2837 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2839 void __user *uaddr = (void __user *)mop->buf;
2840 void __user *old_addr = (void __user *)mop->old_addr;
2843 char raw[sizeof(__uint128_t)];
2844 } old = { .quad = 0}, new = { .quad = 0 };
2845 unsigned int off_in_quad = sizeof(new) - mop->size;
2849 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2853 * This validates off_in_quad. Checking that size is a power
2854 * of two is not necessary, as cmpxchg_guest_abs_with_key
2855 * takes care of that
2857 if (mop->size > sizeof(new))
2859 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2861 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2864 srcu_idx = srcu_read_lock(&kvm->srcu);
2866 if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2871 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2872 new.quad, mop->key, &success);
2873 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2877 srcu_read_unlock(&kvm->srcu, srcu_idx);
2881 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2884 * This is technically a heuristic only, if the kvm->lock is not
2885 * taken, it is not guaranteed that the vm is/remains non-protected.
2886 * This is ok from a kernel perspective, wrongdoing is detected
2887 * on the access, -EFAULT is returned and the vm may crash the
2888 * next time it accesses the memory in question.
2889 * There is no sane usecase to do switching and a memop on two
2890 * different CPUs at the same time.
2892 if (kvm_s390_pv_get_handle(kvm))
2896 case KVM_S390_MEMOP_ABSOLUTE_READ:
2897 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2898 return kvm_s390_vm_mem_op_abs(kvm, mop);
2899 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2900 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2906 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2908 struct kvm *kvm = filp->private_data;
2909 void __user *argp = (void __user *)arg;
2910 struct kvm_device_attr attr;
2914 case KVM_S390_INTERRUPT: {
2915 struct kvm_s390_interrupt s390int;
2918 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2920 r = kvm_s390_inject_vm(kvm, &s390int);
2923 case KVM_CREATE_IRQCHIP: {
2924 struct kvm_irq_routing_entry routing;
2927 if (kvm->arch.use_irqchip) {
2928 /* Set up dummy routing. */
2929 memset(&routing, 0, sizeof(routing));
2930 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2934 case KVM_SET_DEVICE_ATTR: {
2936 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2938 r = kvm_s390_vm_set_attr(kvm, &attr);
2941 case KVM_GET_DEVICE_ATTR: {
2943 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2945 r = kvm_s390_vm_get_attr(kvm, &attr);
2948 case KVM_HAS_DEVICE_ATTR: {
2950 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2952 r = kvm_s390_vm_has_attr(kvm, &attr);
2955 case KVM_S390_GET_SKEYS: {
2956 struct kvm_s390_skeys args;
2959 if (copy_from_user(&args, argp,
2960 sizeof(struct kvm_s390_skeys)))
2962 r = kvm_s390_get_skeys(kvm, &args);
2965 case KVM_S390_SET_SKEYS: {
2966 struct kvm_s390_skeys args;
2969 if (copy_from_user(&args, argp,
2970 sizeof(struct kvm_s390_skeys)))
2972 r = kvm_s390_set_skeys(kvm, &args);
2975 case KVM_S390_GET_CMMA_BITS: {
2976 struct kvm_s390_cmma_log args;
2979 if (copy_from_user(&args, argp, sizeof(args)))
2981 mutex_lock(&kvm->slots_lock);
2982 r = kvm_s390_get_cmma_bits(kvm, &args);
2983 mutex_unlock(&kvm->slots_lock);
2985 r = copy_to_user(argp, &args, sizeof(args));
2991 case KVM_S390_SET_CMMA_BITS: {
2992 struct kvm_s390_cmma_log args;
2995 if (copy_from_user(&args, argp, sizeof(args)))
2997 mutex_lock(&kvm->slots_lock);
2998 r = kvm_s390_set_cmma_bits(kvm, &args);
2999 mutex_unlock(&kvm->slots_lock);
3002 case KVM_S390_PV_COMMAND: {
3003 struct kvm_pv_cmd args;
3005 /* protvirt means user cpu state */
3006 kvm_s390_set_user_cpu_state_ctrl(kvm);
3008 if (!is_prot_virt_host()) {
3012 if (copy_from_user(&args, argp, sizeof(args))) {
3020 /* must be called without kvm->lock */
3021 r = kvm_s390_handle_pv(kvm, &args);
3022 if (copy_to_user(argp, &args, sizeof(args))) {
3028 case KVM_S390_MEM_OP: {
3029 struct kvm_s390_mem_op mem_op;
3031 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3032 r = kvm_s390_vm_mem_op(kvm, &mem_op);
3037 case KVM_S390_ZPCI_OP: {
3038 struct kvm_s390_zpci_op args;
3041 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3043 if (copy_from_user(&args, argp, sizeof(args))) {
3047 r = kvm_s390_pci_zpci_op(kvm, &args);
3057 static int kvm_s390_apxa_installed(void)
3059 struct ap_config_info info;
3061 if (ap_instructions_available()) {
3062 if (ap_qci(&info) == 0)
3070 * The format of the crypto control block (CRYCB) is specified in the 3 low
3071 * order bits of the CRYCB designation (CRYCBD) field as follows:
3072 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3073 * AP extended addressing (APXA) facility are installed.
3074 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3075 * Format 2: Both the APXA and MSAX3 facilities are installed
3077 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3079 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
3081 /* Clear the CRYCB format bits - i.e., set format 0 by default */
3082 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3084 /* Check whether MSAX3 is installed */
3085 if (!test_kvm_facility(kvm, 76))
3088 if (kvm_s390_apxa_installed())
3089 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3091 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3095 * kvm_arch_crypto_set_masks
3097 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3099 * @apm: the mask identifying the accessible AP adapters
3100 * @aqm: the mask identifying the accessible AP domains
3101 * @adm: the mask identifying the accessible AP control domains
3103 * Set the masks that identify the adapters, domains and control domains to
3104 * which the KVM guest is granted access.
3106 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3109 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3110 unsigned long *aqm, unsigned long *adm)
3112 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3114 kvm_s390_vcpu_block_all(kvm);
3116 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3117 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3118 memcpy(crycb->apcb1.apm, apm, 32);
3119 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3120 apm[0], apm[1], apm[2], apm[3]);
3121 memcpy(crycb->apcb1.aqm, aqm, 32);
3122 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3123 aqm[0], aqm[1], aqm[2], aqm[3]);
3124 memcpy(crycb->apcb1.adm, adm, 32);
3125 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3126 adm[0], adm[1], adm[2], adm[3]);
3129 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3130 memcpy(crycb->apcb0.apm, apm, 8);
3131 memcpy(crycb->apcb0.aqm, aqm, 2);
3132 memcpy(crycb->apcb0.adm, adm, 2);
3133 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3134 apm[0], *((unsigned short *)aqm),
3135 *((unsigned short *)adm));
3137 default: /* Can not happen */
3141 /* recreate the shadow crycb for each vcpu */
3142 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3143 kvm_s390_vcpu_unblock_all(kvm);
3145 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3148 * kvm_arch_crypto_clear_masks
3150 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3153 * Clear the masks that identify the adapters, domains and control domains to
3154 * which the KVM guest is granted access.
3156 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3159 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3161 kvm_s390_vcpu_block_all(kvm);
3163 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3164 sizeof(kvm->arch.crypto.crycb->apcb0));
3165 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3166 sizeof(kvm->arch.crypto.crycb->apcb1));
3168 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3169 /* recreate the shadow crycb for each vcpu */
3170 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3171 kvm_s390_vcpu_unblock_all(kvm);
3173 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3175 static u64 kvm_s390_get_initial_cpuid(void)
3180 cpuid.version = 0xff;
3181 return *((u64 *) &cpuid);
3184 static void kvm_s390_crypto_init(struct kvm *kvm)
3186 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3187 kvm_s390_set_crycb_format(kvm);
3188 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3190 if (!test_kvm_facility(kvm, 76))
3193 /* Enable AES/DEA protected key functions by default */
3194 kvm->arch.crypto.aes_kw = 1;
3195 kvm->arch.crypto.dea_kw = 1;
3196 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3197 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3198 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3199 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3202 static void sca_dispose(struct kvm *kvm)
3204 if (kvm->arch.use_esca)
3205 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3207 free_page((unsigned long)(kvm->arch.sca));
3208 kvm->arch.sca = NULL;
3211 void kvm_arch_free_vm(struct kvm *kvm)
3213 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3214 kvm_s390_pci_clear_list(kvm);
3216 __kvm_arch_free_vm(kvm);
3219 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3221 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3223 char debug_name[16];
3224 static unsigned long sca_offset;
3227 #ifdef CONFIG_KVM_S390_UCONTROL
3228 if (type & ~KVM_VM_S390_UCONTROL)
3230 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3237 rc = s390_enable_sie();
3243 if (!sclp.has_64bscao)
3244 alloc_flags |= GFP_DMA;
3245 rwlock_init(&kvm->arch.sca_lock);
3246 /* start with basic SCA */
3247 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3250 mutex_lock(&kvm_lock);
3252 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3254 kvm->arch.sca = (struct bsca_block *)
3255 ((char *) kvm->arch.sca + sca_offset);
3256 mutex_unlock(&kvm_lock);
3258 sprintf(debug_name, "kvm-%u", current->pid);
3260 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3264 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3265 kvm->arch.sie_page2 =
3266 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3267 if (!kvm->arch.sie_page2)
3270 kvm->arch.sie_page2->kvm = kvm;
3271 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3273 for (i = 0; i < kvm_s390_fac_size(); i++) {
3274 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3275 (kvm_s390_fac_base[i] |
3276 kvm_s390_fac_ext[i]);
3277 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3278 kvm_s390_fac_base[i];
3280 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3282 /* we are always in czam mode - even on pre z14 machines */
3283 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3284 set_kvm_facility(kvm->arch.model.fac_list, 138);
3285 /* we emulate STHYI in kvm */
3286 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3287 set_kvm_facility(kvm->arch.model.fac_list, 74);
3288 if (MACHINE_HAS_TLB_GUEST) {
3289 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3290 set_kvm_facility(kvm->arch.model.fac_list, 147);
3293 if (css_general_characteristics.aiv && test_facility(65))
3294 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3296 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3297 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3299 kvm_s390_crypto_init(kvm);
3301 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3302 mutex_lock(&kvm->lock);
3303 kvm_s390_pci_init_list(kvm);
3304 kvm_s390_vcpu_pci_enable_interp(kvm);
3305 mutex_unlock(&kvm->lock);
3308 mutex_init(&kvm->arch.float_int.ais_lock);
3309 spin_lock_init(&kvm->arch.float_int.lock);
3310 for (i = 0; i < FIRQ_LIST_COUNT; i++)
3311 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3312 init_waitqueue_head(&kvm->arch.ipte_wq);
3313 mutex_init(&kvm->arch.ipte_mutex);
3315 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3316 VM_EVENT(kvm, 3, "vm created with type %lu", type);
3318 if (type & KVM_VM_S390_UCONTROL) {
3319 kvm->arch.gmap = NULL;
3320 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3322 if (sclp.hamax == U64_MAX)
3323 kvm->arch.mem_limit = TASK_SIZE_MAX;
3325 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3327 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3328 if (!kvm->arch.gmap)
3330 kvm->arch.gmap->private = kvm;
3331 kvm->arch.gmap->pfault_enabled = 0;
3334 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3335 kvm->arch.use_skf = sclp.has_skey;
3336 spin_lock_init(&kvm->arch.start_stop_lock);
3337 kvm_s390_vsie_init(kvm);
3339 kvm_s390_gisa_init(kvm);
3340 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3341 kvm->arch.pv.set_aside = NULL;
3342 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3346 free_page((unsigned long)kvm->arch.sie_page2);
3347 debug_unregister(kvm->arch.dbf);
3349 KVM_EVENT(3, "creation of vm failed: %d", rc);
3353 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3357 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3358 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3359 kvm_s390_clear_local_irqs(vcpu);
3360 kvm_clear_async_pf_completion_queue(vcpu);
3361 if (!kvm_is_ucontrol(vcpu->kvm))
3363 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3365 if (kvm_is_ucontrol(vcpu->kvm))
3366 gmap_remove(vcpu->arch.gmap);
3368 if (vcpu->kvm->arch.use_cmma)
3369 kvm_s390_vcpu_unsetup_cmma(vcpu);
3370 /* We can not hold the vcpu mutex here, we are already dying */
3371 if (kvm_s390_pv_cpu_get_handle(vcpu))
3372 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3373 free_page((unsigned long)(vcpu->arch.sie_block));
3376 void kvm_arch_destroy_vm(struct kvm *kvm)
3380 kvm_destroy_vcpus(kvm);
3382 kvm_s390_gisa_destroy(kvm);
3384 * We are already at the end of life and kvm->lock is not taken.
3385 * This is ok as the file descriptor is closed by now and nobody
3386 * can mess with the pv state.
3388 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3390 * Remove the mmu notifier only when the whole KVM VM is torn down,
3391 * and only if one was registered to begin with. If the VM is
3392 * currently not protected, but has been previously been protected,
3393 * then it's possible that the notifier is still registered.
3395 if (kvm->arch.pv.mmu_notifier.ops)
3396 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3398 debug_unregister(kvm->arch.dbf);
3399 free_page((unsigned long)kvm->arch.sie_page2);
3400 if (!kvm_is_ucontrol(kvm))
3401 gmap_remove(kvm->arch.gmap);
3402 kvm_s390_destroy_adapters(kvm);
3403 kvm_s390_clear_float_irqs(kvm);
3404 kvm_s390_vsie_destroy(kvm);
3405 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3408 /* Section: vcpu related */
3409 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3411 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3412 if (!vcpu->arch.gmap)
3414 vcpu->arch.gmap->private = vcpu->kvm;
3419 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3421 if (!kvm_s390_use_sca_entries())
3423 read_lock(&vcpu->kvm->arch.sca_lock);
3424 if (vcpu->kvm->arch.use_esca) {
3425 struct esca_block *sca = vcpu->kvm->arch.sca;
3427 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3428 sca->cpu[vcpu->vcpu_id].sda = 0;
3430 struct bsca_block *sca = vcpu->kvm->arch.sca;
3432 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3433 sca->cpu[vcpu->vcpu_id].sda = 0;
3435 read_unlock(&vcpu->kvm->arch.sca_lock);
3438 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3440 if (!kvm_s390_use_sca_entries()) {
3441 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3443 /* we still need the basic sca for the ipte control */
3444 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3445 vcpu->arch.sie_block->scaol = sca_phys;
3448 read_lock(&vcpu->kvm->arch.sca_lock);
3449 if (vcpu->kvm->arch.use_esca) {
3450 struct esca_block *sca = vcpu->kvm->arch.sca;
3451 phys_addr_t sca_phys = virt_to_phys(sca);
3453 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3454 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3455 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3456 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3457 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3459 struct bsca_block *sca = vcpu->kvm->arch.sca;
3460 phys_addr_t sca_phys = virt_to_phys(sca);
3462 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3463 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3464 vcpu->arch.sie_block->scaol = sca_phys;
3465 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3467 read_unlock(&vcpu->kvm->arch.sca_lock);
3470 /* Basic SCA to Extended SCA data copy routines */
3471 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3474 d->sigp_ctrl.c = s->sigp_ctrl.c;
3475 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3478 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3482 d->ipte_control = s->ipte_control;
3484 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3485 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3488 static int sca_switch_to_extended(struct kvm *kvm)
3490 struct bsca_block *old_sca = kvm->arch.sca;
3491 struct esca_block *new_sca;
3492 struct kvm_vcpu *vcpu;
3493 unsigned long vcpu_idx;
3495 phys_addr_t new_sca_phys;
3497 if (kvm->arch.use_esca)
3500 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3504 new_sca_phys = virt_to_phys(new_sca);
3505 scaoh = new_sca_phys >> 32;
3506 scaol = new_sca_phys & ESCA_SCAOL_MASK;
3508 kvm_s390_vcpu_block_all(kvm);
3509 write_lock(&kvm->arch.sca_lock);
3511 sca_copy_b_to_e(new_sca, old_sca);
3513 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3514 vcpu->arch.sie_block->scaoh = scaoh;
3515 vcpu->arch.sie_block->scaol = scaol;
3516 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3518 kvm->arch.sca = new_sca;
3519 kvm->arch.use_esca = 1;
3521 write_unlock(&kvm->arch.sca_lock);
3522 kvm_s390_vcpu_unblock_all(kvm);
3524 free_page((unsigned long)old_sca);
3526 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3527 old_sca, kvm->arch.sca);
3531 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3535 if (!kvm_s390_use_sca_entries()) {
3536 if (id < KVM_MAX_VCPUS)
3540 if (id < KVM_S390_BSCA_CPU_SLOTS)
3542 if (!sclp.has_esca || !sclp.has_64bscao)
3545 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3547 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3550 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3551 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3553 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3554 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3555 vcpu->arch.cputm_start = get_tod_clock_fast();
3556 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3559 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3560 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3562 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3563 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3564 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3565 vcpu->arch.cputm_start = 0;
3566 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3569 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3570 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3572 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3573 vcpu->arch.cputm_enabled = true;
3574 __start_cpu_timer_accounting(vcpu);
3577 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3578 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3580 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3581 __stop_cpu_timer_accounting(vcpu);
3582 vcpu->arch.cputm_enabled = false;
3585 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3587 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3588 __enable_cpu_timer_accounting(vcpu);
3592 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3594 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3595 __disable_cpu_timer_accounting(vcpu);
3599 /* set the cpu timer - may only be called from the VCPU thread itself */
3600 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3602 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3603 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3604 if (vcpu->arch.cputm_enabled)
3605 vcpu->arch.cputm_start = get_tod_clock_fast();
3606 vcpu->arch.sie_block->cputm = cputm;
3607 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3611 /* update and get the cpu timer - can also be called from other VCPU threads */
3612 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3617 if (unlikely(!vcpu->arch.cputm_enabled))
3618 return vcpu->arch.sie_block->cputm;
3620 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3622 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3624 * If the writer would ever execute a read in the critical
3625 * section, e.g. in irq context, we have a deadlock.
3627 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3628 value = vcpu->arch.sie_block->cputm;
3629 /* if cputm_start is 0, accounting is being started/stopped */
3630 if (likely(vcpu->arch.cputm_start))
3631 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3632 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3637 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3640 gmap_enable(vcpu->arch.enabled_gmap);
3641 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3642 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3643 __start_cpu_timer_accounting(vcpu);
3647 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3650 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3651 __stop_cpu_timer_accounting(vcpu);
3652 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3653 vcpu->arch.enabled_gmap = gmap_get_enabled();
3654 gmap_disable(vcpu->arch.enabled_gmap);
3658 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3660 mutex_lock(&vcpu->kvm->lock);
3662 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3663 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3665 mutex_unlock(&vcpu->kvm->lock);
3666 if (!kvm_is_ucontrol(vcpu->kvm)) {
3667 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3670 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3671 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3672 /* make vcpu_load load the right gmap on the first trigger */
3673 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3676 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3678 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3679 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3684 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3686 /* At least one ECC subfunction must be present */
3687 return kvm_has_pckmo_subfunc(kvm, 32) ||
3688 kvm_has_pckmo_subfunc(kvm, 33) ||
3689 kvm_has_pckmo_subfunc(kvm, 34) ||
3690 kvm_has_pckmo_subfunc(kvm, 40) ||
3691 kvm_has_pckmo_subfunc(kvm, 41);
3695 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3698 * If the AP instructions are not being interpreted and the MSAX3
3699 * facility is not configured for the guest, there is nothing to set up.
3701 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3704 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3705 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3706 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3707 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3709 if (vcpu->kvm->arch.crypto.apie)
3710 vcpu->arch.sie_block->eca |= ECA_APIE;
3712 /* Set up protected key support */
3713 if (vcpu->kvm->arch.crypto.aes_kw) {
3714 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3715 /* ecc is also wrapped with AES key */
3716 if (kvm_has_pckmo_ecc(vcpu->kvm))
3717 vcpu->arch.sie_block->ecd |= ECD_ECC;
3720 if (vcpu->kvm->arch.crypto.dea_kw)
3721 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3724 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3726 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3727 vcpu->arch.sie_block->cbrlo = 0;
3730 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3732 void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3737 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3741 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3743 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3745 vcpu->arch.sie_block->ibc = model->ibc;
3746 if (test_kvm_facility(vcpu->kvm, 7))
3747 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3750 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3755 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3759 if (test_kvm_facility(vcpu->kvm, 78))
3760 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3761 else if (test_kvm_facility(vcpu->kvm, 8))
3762 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3764 kvm_s390_vcpu_setup_model(vcpu);
3766 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3767 if (MACHINE_HAS_ESOP)
3768 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3769 if (test_kvm_facility(vcpu->kvm, 9))
3770 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3771 if (test_kvm_facility(vcpu->kvm, 11))
3772 vcpu->arch.sie_block->ecb |= ECB_PTF;
3773 if (test_kvm_facility(vcpu->kvm, 73))
3774 vcpu->arch.sie_block->ecb |= ECB_TE;
3775 if (!kvm_is_ucontrol(vcpu->kvm))
3776 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3778 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3779 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3780 if (test_kvm_facility(vcpu->kvm, 130))
3781 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3782 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3784 vcpu->arch.sie_block->eca |= ECA_CEI;
3786 vcpu->arch.sie_block->eca |= ECA_IB;
3788 vcpu->arch.sie_block->eca |= ECA_SII;
3789 if (sclp.has_sigpif)
3790 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3791 if (test_kvm_facility(vcpu->kvm, 129)) {
3792 vcpu->arch.sie_block->eca |= ECA_VX;
3793 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3795 if (test_kvm_facility(vcpu->kvm, 139))
3796 vcpu->arch.sie_block->ecd |= ECD_MEF;
3797 if (test_kvm_facility(vcpu->kvm, 156))
3798 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3799 if (vcpu->arch.sie_block->gd) {
3800 vcpu->arch.sie_block->eca |= ECA_AIV;
3801 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3802 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3804 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3805 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3808 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3810 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3812 if (vcpu->kvm->arch.use_cmma) {
3813 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3817 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3818 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3820 vcpu->arch.sie_block->hpid = HPID_KVM;
3822 kvm_s390_vcpu_crypto_setup(vcpu);
3824 kvm_s390_vcpu_pci_setup(vcpu);
3826 mutex_lock(&vcpu->kvm->lock);
3827 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3828 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3830 kvm_s390_vcpu_unsetup_cmma(vcpu);
3832 mutex_unlock(&vcpu->kvm->lock);
3837 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3839 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3844 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3846 struct sie_page *sie_page;
3849 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3850 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3854 vcpu->arch.sie_block = &sie_page->sie_block;
3855 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3857 /* the real guest size will always be smaller than msl */
3858 vcpu->arch.sie_block->mso = 0;
3859 vcpu->arch.sie_block->msl = sclp.hamax;
3861 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3862 spin_lock_init(&vcpu->arch.local_int.lock);
3863 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3864 seqcount_init(&vcpu->arch.cputm_seqcount);
3866 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3867 kvm_clear_async_pf_completion_queue(vcpu);
3868 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3875 kvm_s390_set_prefix(vcpu, 0);
3876 if (test_kvm_facility(vcpu->kvm, 64))
3877 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3878 if (test_kvm_facility(vcpu->kvm, 82))
3879 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3880 if (test_kvm_facility(vcpu->kvm, 133))
3881 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3882 if (test_kvm_facility(vcpu->kvm, 156))
3883 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3884 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3885 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3888 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3890 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3892 if (kvm_is_ucontrol(vcpu->kvm)) {
3893 rc = __kvm_ucontrol_vcpu_init(vcpu);
3895 goto out_free_sie_block;
3898 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3899 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3900 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3902 rc = kvm_s390_vcpu_setup(vcpu);
3904 goto out_ucontrol_uninit;
3906 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3909 out_ucontrol_uninit:
3910 if (kvm_is_ucontrol(vcpu->kvm))
3911 gmap_remove(vcpu->arch.gmap);
3913 free_page((unsigned long)(vcpu->arch.sie_block));
3917 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3919 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3920 return kvm_s390_vcpu_has_irq(vcpu, 0);
3923 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3925 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3928 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3930 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3934 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3936 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3939 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3941 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3945 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3947 return atomic_read(&vcpu->arch.sie_block->prog20) &
3948 (PROG_BLOCK_SIE | PROG_REQUEST);
3951 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3953 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3957 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3958 * If the CPU is not running (e.g. waiting as idle) the function will
3959 * return immediately. */
3960 void exit_sie(struct kvm_vcpu *vcpu)
3962 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3963 kvm_s390_vsie_kick(vcpu);
3964 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3968 /* Kick a guest cpu out of SIE to process a request synchronously */
3969 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3971 __kvm_make_request(req, vcpu);
3972 kvm_s390_vcpu_request(vcpu);
3975 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3978 struct kvm *kvm = gmap->private;
3979 struct kvm_vcpu *vcpu;
3980 unsigned long prefix;
3983 if (gmap_is_shadow(gmap))
3985 if (start >= 1UL << 31)
3986 /* We are only interested in prefix pages */
3988 kvm_for_each_vcpu(i, vcpu, kvm) {
3989 /* match against both prefix pages */
3990 prefix = kvm_s390_get_prefix(vcpu);
3991 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3992 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3994 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
3999 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4001 /* do not poll with more than halt_poll_max_steal percent of steal time */
4002 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
4003 READ_ONCE(halt_poll_max_steal)) {
4004 vcpu->stat.halt_no_poll_steal++;
4010 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4012 /* kvm common code refers to this, but never calls it */
4017 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4018 struct kvm_one_reg *reg)
4023 case KVM_REG_S390_TODPR:
4024 r = put_user(vcpu->arch.sie_block->todpr,
4025 (u32 __user *)reg->addr);
4027 case KVM_REG_S390_EPOCHDIFF:
4028 r = put_user(vcpu->arch.sie_block->epoch,
4029 (u64 __user *)reg->addr);
4031 case KVM_REG_S390_CPU_TIMER:
4032 r = put_user(kvm_s390_get_cpu_timer(vcpu),
4033 (u64 __user *)reg->addr);
4035 case KVM_REG_S390_CLOCK_COMP:
4036 r = put_user(vcpu->arch.sie_block->ckc,
4037 (u64 __user *)reg->addr);
4039 case KVM_REG_S390_PFTOKEN:
4040 r = put_user(vcpu->arch.pfault_token,
4041 (u64 __user *)reg->addr);
4043 case KVM_REG_S390_PFCOMPARE:
4044 r = put_user(vcpu->arch.pfault_compare,
4045 (u64 __user *)reg->addr);
4047 case KVM_REG_S390_PFSELECT:
4048 r = put_user(vcpu->arch.pfault_select,
4049 (u64 __user *)reg->addr);
4051 case KVM_REG_S390_PP:
4052 r = put_user(vcpu->arch.sie_block->pp,
4053 (u64 __user *)reg->addr);
4055 case KVM_REG_S390_GBEA:
4056 r = put_user(vcpu->arch.sie_block->gbea,
4057 (u64 __user *)reg->addr);
4066 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4067 struct kvm_one_reg *reg)
4073 case KVM_REG_S390_TODPR:
4074 r = get_user(vcpu->arch.sie_block->todpr,
4075 (u32 __user *)reg->addr);
4077 case KVM_REG_S390_EPOCHDIFF:
4078 r = get_user(vcpu->arch.sie_block->epoch,
4079 (u64 __user *)reg->addr);
4081 case KVM_REG_S390_CPU_TIMER:
4082 r = get_user(val, (u64 __user *)reg->addr);
4084 kvm_s390_set_cpu_timer(vcpu, val);
4086 case KVM_REG_S390_CLOCK_COMP:
4087 r = get_user(vcpu->arch.sie_block->ckc,
4088 (u64 __user *)reg->addr);
4090 case KVM_REG_S390_PFTOKEN:
4091 r = get_user(vcpu->arch.pfault_token,
4092 (u64 __user *)reg->addr);
4093 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4094 kvm_clear_async_pf_completion_queue(vcpu);
4096 case KVM_REG_S390_PFCOMPARE:
4097 r = get_user(vcpu->arch.pfault_compare,
4098 (u64 __user *)reg->addr);
4100 case KVM_REG_S390_PFSELECT:
4101 r = get_user(vcpu->arch.pfault_select,
4102 (u64 __user *)reg->addr);
4104 case KVM_REG_S390_PP:
4105 r = get_user(vcpu->arch.sie_block->pp,
4106 (u64 __user *)reg->addr);
4108 case KVM_REG_S390_GBEA:
4109 r = get_user(vcpu->arch.sie_block->gbea,
4110 (u64 __user *)reg->addr);
4119 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4121 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4122 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4123 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4125 kvm_clear_async_pf_completion_queue(vcpu);
4126 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4127 kvm_s390_vcpu_stop(vcpu);
4128 kvm_s390_clear_local_irqs(vcpu);
4131 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4133 /* Initial reset is a superset of the normal reset */
4134 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4137 * This equals initial cpu reset in pop, but we don't switch to ESA.
4138 * We do not only reset the internal data, but also ...
4140 vcpu->arch.sie_block->gpsw.mask = 0;
4141 vcpu->arch.sie_block->gpsw.addr = 0;
4142 kvm_s390_set_prefix(vcpu, 0);
4143 kvm_s390_set_cpu_timer(vcpu, 0);
4144 vcpu->arch.sie_block->ckc = 0;
4145 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4146 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4147 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4149 /* ... the data in sync regs */
4150 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4151 vcpu->run->s.regs.ckc = 0;
4152 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4153 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4154 vcpu->run->psw_addr = 0;
4155 vcpu->run->psw_mask = 0;
4156 vcpu->run->s.regs.todpr = 0;
4157 vcpu->run->s.regs.cputm = 0;
4158 vcpu->run->s.regs.ckc = 0;
4159 vcpu->run->s.regs.pp = 0;
4160 vcpu->run->s.regs.gbea = 1;
4161 vcpu->run->s.regs.fpc = 0;
4163 * Do not reset these registers in the protected case, as some of
4164 * them are overlaid and they are not accessible in this case
4167 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4168 vcpu->arch.sie_block->gbea = 1;
4169 vcpu->arch.sie_block->pp = 0;
4170 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4171 vcpu->arch.sie_block->todpr = 0;
4175 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4177 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4179 /* Clear reset is a superset of the initial reset */
4180 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4182 memset(®s->gprs, 0, sizeof(regs->gprs));
4183 memset(®s->vrs, 0, sizeof(regs->vrs));
4184 memset(®s->acrs, 0, sizeof(regs->acrs));
4185 memset(®s->gscb, 0, sizeof(regs->gscb));
4188 regs->etoken_extension = 0;
4191 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4194 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
4199 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4202 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4207 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4208 struct kvm_sregs *sregs)
4212 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4213 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4219 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4220 struct kvm_sregs *sregs)
4224 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4225 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4231 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4237 if (test_fp_ctl(fpu->fpc)) {
4241 vcpu->run->s.regs.fpc = fpu->fpc;
4243 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4244 (freg_t *) fpu->fprs);
4246 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4253 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4257 /* make sure we have the latest values */
4260 convert_vx_to_fp((freg_t *) fpu->fprs,
4261 (__vector128 *) vcpu->run->s.regs.vrs);
4263 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4264 fpu->fpc = vcpu->run->s.regs.fpc;
4270 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4274 if (!is_vcpu_stopped(vcpu))
4277 vcpu->run->psw_mask = psw.mask;
4278 vcpu->run->psw_addr = psw.addr;
4283 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4284 struct kvm_translation *tr)
4286 return -EINVAL; /* not implemented yet */
4289 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4290 KVM_GUESTDBG_USE_HW_BP | \
4291 KVM_GUESTDBG_ENABLE)
4293 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4294 struct kvm_guest_debug *dbg)
4300 vcpu->guest_debug = 0;
4301 kvm_s390_clear_bp_data(vcpu);
4303 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4307 if (!sclp.has_gpere) {
4312 if (dbg->control & KVM_GUESTDBG_ENABLE) {
4313 vcpu->guest_debug = dbg->control;
4314 /* enforce guest PER */
4315 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4317 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4318 rc = kvm_s390_import_bp_data(vcpu, dbg);
4320 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4321 vcpu->arch.guestdbg.last_bp = 0;
4325 vcpu->guest_debug = 0;
4326 kvm_s390_clear_bp_data(vcpu);
4327 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4335 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4336 struct kvm_mp_state *mp_state)
4342 /* CHECK_STOP and LOAD are not supported yet */
4343 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4344 KVM_MP_STATE_OPERATING;
4350 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4351 struct kvm_mp_state *mp_state)
4357 /* user space knows about this interface - let it control the state */
4358 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4360 switch (mp_state->mp_state) {
4361 case KVM_MP_STATE_STOPPED:
4362 rc = kvm_s390_vcpu_stop(vcpu);
4364 case KVM_MP_STATE_OPERATING:
4365 rc = kvm_s390_vcpu_start(vcpu);
4367 case KVM_MP_STATE_LOAD:
4368 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4372 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4374 case KVM_MP_STATE_CHECK_STOP:
4375 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
4384 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4386 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4389 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4392 kvm_s390_vcpu_request_handled(vcpu);
4393 if (!kvm_request_pending(vcpu))
4396 * If the guest prefix changed, re-arm the ipte notifier for the
4397 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4398 * This ensures that the ipte instruction for this request has
4399 * already finished. We might race against a second unmapper that
4400 * wants to set the blocking bit. Lets just retry the request loop.
4402 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4404 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4405 kvm_s390_get_prefix(vcpu),
4406 PAGE_SIZE * 2, PROT_WRITE);
4408 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4414 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4415 vcpu->arch.sie_block->ihcpu = 0xffff;
4419 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4420 if (!ibs_enabled(vcpu)) {
4421 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4422 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4427 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4428 if (ibs_enabled(vcpu)) {
4429 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4430 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4435 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4436 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4440 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4442 * Disable CMM virtualization; we will emulate the ESSA
4443 * instruction manually, in order to provide additional
4444 * functionalities needed for live migration.
4446 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4450 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4452 * Re-enable CMM virtualization if CMMA is available and
4453 * CMM has been used.
4455 if ((vcpu->kvm->arch.use_cmma) &&
4456 (vcpu->kvm->mm->context.uses_cmm))
4457 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4461 /* we left the vsie handler, nothing to do, just clear the request */
4462 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4467 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4469 struct kvm_vcpu *vcpu;
4470 union tod_clock clk;
4475 store_tod_clock_ext(&clk);
4477 kvm->arch.epoch = gtod->tod - clk.tod;
4479 if (test_kvm_facility(kvm, 139)) {
4480 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4481 if (kvm->arch.epoch > gtod->tod)
4482 kvm->arch.epdx -= 1;
4485 kvm_s390_vcpu_block_all(kvm);
4486 kvm_for_each_vcpu(i, vcpu, kvm) {
4487 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4488 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4491 kvm_s390_vcpu_unblock_all(kvm);
4495 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4497 if (!mutex_trylock(&kvm->lock))
4499 __kvm_s390_set_tod_clock(kvm, gtod);
4500 mutex_unlock(&kvm->lock);
4505 * kvm_arch_fault_in_page - fault-in guest page if necessary
4506 * @vcpu: The corresponding virtual cpu
4507 * @gpa: Guest physical address
4508 * @writable: Whether the page should be writable or not
4510 * Make sure that a guest page has been faulted-in on the host.
4512 * Return: Zero on success, negative error code otherwise.
4514 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
4516 return gmap_fault(vcpu->arch.gmap, gpa,
4517 writable ? FAULT_FLAG_WRITE : 0);
4520 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4521 unsigned long token)
4523 struct kvm_s390_interrupt inti;
4524 struct kvm_s390_irq irq;
4527 irq.u.ext.ext_params2 = token;
4528 irq.type = KVM_S390_INT_PFAULT_INIT;
4529 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4531 inti.type = KVM_S390_INT_PFAULT_DONE;
4532 inti.parm64 = token;
4533 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4537 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4538 struct kvm_async_pf *work)
4540 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4541 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4546 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4547 struct kvm_async_pf *work)
4549 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4550 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4553 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4554 struct kvm_async_pf *work)
4556 /* s390 will always inject the page directly */
4559 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4562 * s390 will always inject the page directly,
4563 * but we still want check_async_completion to cleanup
4568 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4571 struct kvm_arch_async_pf arch;
4573 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4575 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4576 vcpu->arch.pfault_compare)
4578 if (psw_extint_disabled(vcpu))
4580 if (kvm_s390_vcpu_has_irq(vcpu, 0))
4582 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4584 if (!vcpu->arch.gmap->pfault_enabled)
4587 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4588 hva += current->thread.gmap_addr & ~PAGE_MASK;
4589 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4592 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4595 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4600 * On s390 notifications for arriving pages will be delivered directly
4601 * to the guest but the house keeping for completed pfaults is
4602 * handled outside the worker.
4604 kvm_check_async_pf_completion(vcpu);
4606 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4607 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4612 if (!kvm_is_ucontrol(vcpu->kvm)) {
4613 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4618 rc = kvm_s390_handle_requests(vcpu);
4622 if (guestdbg_enabled(vcpu)) {
4623 kvm_s390_backup_guest_per_regs(vcpu);
4624 kvm_s390_patch_guest_per_regs(vcpu);
4627 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4629 vcpu->arch.sie_block->icptcode = 0;
4630 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4631 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4632 trace_kvm_s390_sie_enter(vcpu, cpuflags);
4637 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4639 struct kvm_s390_pgm_info pgm_info = {
4640 .code = PGM_ADDRESSING,
4645 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4646 trace_kvm_s390_sie_fault(vcpu);
4649 * We want to inject an addressing exception, which is defined as a
4650 * suppressing or terminating exception. However, since we came here
4651 * by a DAT access exception, the PSW still points to the faulting
4652 * instruction since DAT exceptions are nullifying. So we've got
4653 * to look up the current opcode to get the length of the instruction
4654 * to be able to forward the PSW.
4656 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4657 ilen = insn_length(opcode);
4661 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4662 * Forward by arbitrary ilc, injection will take care of
4663 * nullification if necessary.
4665 pgm_info = vcpu->arch.pgm;
4668 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4669 kvm_s390_forward_psw(vcpu, ilen);
4670 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4673 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4675 struct mcck_volatile_info *mcck_info;
4676 struct sie_page *sie_page;
4678 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4679 vcpu->arch.sie_block->icptcode);
4680 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4682 if (guestdbg_enabled(vcpu))
4683 kvm_s390_restore_guest_per_regs(vcpu);
4685 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4686 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4688 if (exit_reason == -EINTR) {
4689 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4690 sie_page = container_of(vcpu->arch.sie_block,
4691 struct sie_page, sie_block);
4692 mcck_info = &sie_page->mcck_info;
4693 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4697 if (vcpu->arch.sie_block->icptcode > 0) {
4698 int rc = kvm_handle_sie_intercept(vcpu);
4700 if (rc != -EOPNOTSUPP)
4702 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4703 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4704 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4705 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4707 } else if (exit_reason != -EFAULT) {
4708 vcpu->stat.exit_null++;
4710 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4711 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4712 vcpu->run->s390_ucontrol.trans_exc_code =
4713 current->thread.gmap_addr;
4714 vcpu->run->s390_ucontrol.pgm_code = 0x10;
4716 } else if (current->thread.gmap_pfault) {
4717 trace_kvm_s390_major_guest_pfault(vcpu);
4718 current->thread.gmap_pfault = 0;
4719 if (kvm_arch_setup_async_pf(vcpu))
4721 vcpu->stat.pfault_sync++;
4722 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4724 return vcpu_post_run_fault_in_sie(vcpu);
4727 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4728 static int __vcpu_run(struct kvm_vcpu *vcpu)
4730 int rc, exit_reason;
4731 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4734 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4735 * ning the guest), so that memslots (and other stuff) are protected
4737 kvm_vcpu_srcu_read_lock(vcpu);
4740 rc = vcpu_pre_run(vcpu);
4744 kvm_vcpu_srcu_read_unlock(vcpu);
4746 * As PF_VCPU will be used in fault handler, between
4747 * guest_enter and guest_exit should be no uaccess.
4749 local_irq_disable();
4750 guest_enter_irqoff();
4751 __disable_cpu_timer_accounting(vcpu);
4753 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4754 memcpy(sie_page->pv_grregs,
4755 vcpu->run->s.regs.gprs,
4756 sizeof(sie_page->pv_grregs));
4758 if (test_cpu_flag(CIF_FPU))
4760 exit_reason = sie64a(vcpu->arch.sie_block,
4761 vcpu->run->s.regs.gprs);
4762 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4763 memcpy(vcpu->run->s.regs.gprs,
4764 sie_page->pv_grregs,
4765 sizeof(sie_page->pv_grregs));
4767 * We're not allowed to inject interrupts on intercepts
4768 * that leave the guest state in an "in-between" state
4769 * where the next SIE entry will do a continuation.
4770 * Fence interrupts in our "internal" PSW.
4772 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4773 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4774 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4777 local_irq_disable();
4778 __enable_cpu_timer_accounting(vcpu);
4779 guest_exit_irqoff();
4781 kvm_vcpu_srcu_read_lock(vcpu);
4783 rc = vcpu_post_run(vcpu, exit_reason);
4784 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4786 kvm_vcpu_srcu_read_unlock(vcpu);
4790 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4792 struct kvm_run *kvm_run = vcpu->run;
4793 struct runtime_instr_cb *riccb;
4796 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4797 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4798 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4799 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4800 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4801 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4802 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4803 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4805 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4806 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4807 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4808 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4809 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4810 kvm_clear_async_pf_completion_queue(vcpu);
4812 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4813 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4814 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4815 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4818 * If userspace sets the riccb (e.g. after migration) to a valid state,
4819 * we should enable RI here instead of doing the lazy enablement.
4821 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4822 test_kvm_facility(vcpu->kvm, 64) &&
4824 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4825 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4826 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4829 * If userspace sets the gscb (e.g. after migration) to non-zero,
4830 * we should enable GS here instead of doing the lazy enablement.
4832 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4833 test_kvm_facility(vcpu->kvm, 133) &&
4835 !vcpu->arch.gs_enabled) {
4836 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4837 vcpu->arch.sie_block->ecb |= ECB_GS;
4838 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4839 vcpu->arch.gs_enabled = 1;
4841 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4842 test_kvm_facility(vcpu->kvm, 82)) {
4843 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4844 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4846 if (MACHINE_HAS_GS) {
4848 __ctl_set_bit(2, 4);
4849 if (current->thread.gs_cb) {
4850 vcpu->arch.host_gscb = current->thread.gs_cb;
4851 save_gs_cb(vcpu->arch.host_gscb);
4853 if (vcpu->arch.gs_enabled) {
4854 current->thread.gs_cb = (struct gs_cb *)
4855 &vcpu->run->s.regs.gscb;
4856 restore_gs_cb(current->thread.gs_cb);
4860 /* SIE will load etoken directly from SDNX and therefore kvm_run */
4863 static void sync_regs(struct kvm_vcpu *vcpu)
4865 struct kvm_run *kvm_run = vcpu->run;
4867 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4868 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4869 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4870 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4871 /* some control register changes require a tlb flush */
4872 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4874 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4875 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4876 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4878 save_access_regs(vcpu->arch.host_acrs);
4879 restore_access_regs(vcpu->run->s.regs.acrs);
4880 /* save host (userspace) fprs/vrs */
4882 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4883 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4885 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4887 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4888 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4889 if (test_fp_ctl(current->thread.fpu.fpc))
4890 /* User space provided an invalid FPC, let's clear it */
4891 current->thread.fpu.fpc = 0;
4893 /* Sync fmt2 only data */
4894 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4895 sync_regs_fmt2(vcpu);
4898 * In several places we have to modify our internal view to
4899 * not do things that are disallowed by the ultravisor. For
4900 * example we must not inject interrupts after specific exits
4901 * (e.g. 112 prefix page not secure). We do this by turning
4902 * off the machine check, external and I/O interrupt bits
4903 * of our PSW copy. To avoid getting validity intercepts, we
4904 * do only accept the condition code from userspace.
4906 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4907 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4911 kvm_run->kvm_dirty_regs = 0;
4914 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4916 struct kvm_run *kvm_run = vcpu->run;
4918 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4919 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4920 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4921 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4922 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4923 if (MACHINE_HAS_GS) {
4925 __ctl_set_bit(2, 4);
4926 if (vcpu->arch.gs_enabled)
4927 save_gs_cb(current->thread.gs_cb);
4928 current->thread.gs_cb = vcpu->arch.host_gscb;
4929 restore_gs_cb(vcpu->arch.host_gscb);
4930 if (!vcpu->arch.host_gscb)
4931 __ctl_clear_bit(2, 4);
4932 vcpu->arch.host_gscb = NULL;
4935 /* SIE will save etoken directly into SDNX and therefore kvm_run */
4938 static void store_regs(struct kvm_vcpu *vcpu)
4940 struct kvm_run *kvm_run = vcpu->run;
4942 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4943 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4944 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4945 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4946 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4947 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4948 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4949 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4950 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4951 save_access_regs(vcpu->run->s.regs.acrs);
4952 restore_access_regs(vcpu->arch.host_acrs);
4953 /* Save guest register state */
4955 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4956 /* Restore will be done lazily at return */
4957 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4958 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4959 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4960 store_regs_fmt2(vcpu);
4963 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4965 struct kvm_run *kvm_run = vcpu->run;
4969 * Running a VM while dumping always has the potential to
4970 * produce inconsistent dump data. But for PV vcpus a SIE
4971 * entry while dumping could also lead to a fatal validity
4972 * intercept which we absolutely want to avoid.
4974 if (vcpu->kvm->arch.pv.dumping)
4977 if (kvm_run->immediate_exit)
4980 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4981 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4986 if (guestdbg_exit_pending(vcpu)) {
4987 kvm_s390_prepare_debug_exit(vcpu);
4992 kvm_sigset_activate(vcpu);
4995 * no need to check the return value of vcpu_start as it can only have
4996 * an error for protvirt, but protvirt means user cpu state
4998 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4999 kvm_s390_vcpu_start(vcpu);
5000 } else if (is_vcpu_stopped(vcpu)) {
5001 pr_err_ratelimited("can't run stopped vcpu %d\n",
5008 enable_cpu_timer_accounting(vcpu);
5011 rc = __vcpu_run(vcpu);
5013 if (signal_pending(current) && !rc) {
5014 kvm_run->exit_reason = KVM_EXIT_INTR;
5018 if (guestdbg_exit_pending(vcpu) && !rc) {
5019 kvm_s390_prepare_debug_exit(vcpu);
5023 if (rc == -EREMOTE) {
5024 /* userspace support is needed, kvm_run has been prepared */
5028 disable_cpu_timer_accounting(vcpu);
5031 kvm_sigset_deactivate(vcpu);
5033 vcpu->stat.exit_userspace++;
5040 * store status at address
5041 * we use have two special cases:
5042 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5043 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5045 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5047 unsigned char archmode = 1;
5048 freg_t fprs[NUM_FPRS];
5053 px = kvm_s390_get_prefix(vcpu);
5054 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5055 if (write_guest_abs(vcpu, 163, &archmode, 1))
5058 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5059 if (write_guest_real(vcpu, 163, &archmode, 1))
5063 gpa -= __LC_FPREGS_SAVE_AREA;
5065 /* manually convert vector registers if necessary */
5066 if (MACHINE_HAS_VX) {
5067 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5068 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5071 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5072 vcpu->run->s.regs.fprs, 128);
5074 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5075 vcpu->run->s.regs.gprs, 128);
5076 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5077 &vcpu->arch.sie_block->gpsw, 16);
5078 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5080 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5081 &vcpu->run->s.regs.fpc, 4);
5082 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5083 &vcpu->arch.sie_block->todpr, 4);
5084 cputm = kvm_s390_get_cpu_timer(vcpu);
5085 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5087 clkcomp = vcpu->arch.sie_block->ckc >> 8;
5088 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5090 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5091 &vcpu->run->s.regs.acrs, 64);
5092 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5093 &vcpu->arch.sie_block->gcr, 128);
5094 return rc ? -EFAULT : 0;
5097 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5100 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5101 * switch in the run ioctl. Let's update our copies before we save
5102 * it into the save area
5105 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5106 save_access_regs(vcpu->run->s.regs.acrs);
5108 return kvm_s390_store_status_unloaded(vcpu, addr);
5111 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5113 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5114 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5117 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5120 struct kvm_vcpu *vcpu;
5122 kvm_for_each_vcpu(i, vcpu, kvm) {
5123 __disable_ibs_on_vcpu(vcpu);
5127 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5131 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5132 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5135 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5137 int i, online_vcpus, r = 0, started_vcpus = 0;
5139 if (!is_vcpu_stopped(vcpu))
5142 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5143 /* Only one cpu at a time may enter/leave the STOPPED state. */
5144 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5145 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5147 /* Let's tell the UV that we want to change into the operating state */
5148 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5149 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5151 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5156 for (i = 0; i < online_vcpus; i++) {
5157 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5161 if (started_vcpus == 0) {
5162 /* we're the only active VCPU -> speed it up */
5163 __enable_ibs_on_vcpu(vcpu);
5164 } else if (started_vcpus == 1) {
5166 * As we are starting a second VCPU, we have to disable
5167 * the IBS facility on all VCPUs to remove potentially
5168 * outstanding ENABLE requests.
5170 __disable_ibs_on_all_vcpus(vcpu->kvm);
5173 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5175 * The real PSW might have changed due to a RESTART interpreted by the
5176 * ultravisor. We block all interrupts and let the next sie exit
5179 if (kvm_s390_pv_cpu_is_protected(vcpu))
5180 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5182 * Another VCPU might have used IBS while we were offline.
5183 * Let's play safe and flush the VCPU at startup.
5185 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5186 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5190 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5192 int i, online_vcpus, r = 0, started_vcpus = 0;
5193 struct kvm_vcpu *started_vcpu = NULL;
5195 if (is_vcpu_stopped(vcpu))
5198 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5199 /* Only one cpu at a time may enter/leave the STOPPED state. */
5200 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5201 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5203 /* Let's tell the UV that we want to change into the stopped state */
5204 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5205 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5207 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5213 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5214 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5215 * have been fully processed. This will ensure that the VCPU
5216 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5218 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5219 kvm_s390_clear_stop_irq(vcpu);
5221 __disable_ibs_on_vcpu(vcpu);
5223 for (i = 0; i < online_vcpus; i++) {
5224 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5226 if (!is_vcpu_stopped(tmp)) {
5232 if (started_vcpus == 1) {
5234 * As we only have one VCPU left, we want to enable the
5235 * IBS facility for that VCPU to speed it up.
5237 __enable_ibs_on_vcpu(started_vcpu);
5240 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5244 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5245 struct kvm_enable_cap *cap)
5253 case KVM_CAP_S390_CSS_SUPPORT:
5254 if (!vcpu->kvm->arch.css_support) {
5255 vcpu->kvm->arch.css_support = 1;
5256 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5257 trace_kvm_s390_enable_css(vcpu->kvm);
5268 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5269 struct kvm_s390_mem_op *mop)
5271 void __user *uaddr = (void __user *)mop->buf;
5275 if (mop->flags || !mop->size)
5277 if (mop->size + mop->sida_offset < mop->size)
5279 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5281 if (!kvm_s390_pv_cpu_is_protected(vcpu))
5284 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5287 case KVM_S390_MEMOP_SIDA_READ:
5288 if (copy_to_user(uaddr, sida_addr, mop->size))
5292 case KVM_S390_MEMOP_SIDA_WRITE:
5293 if (copy_from_user(sida_addr, uaddr, mop->size))
5300 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5301 struct kvm_s390_mem_op *mop)
5303 void __user *uaddr = (void __user *)mop->buf;
5304 enum gacc_mode acc_mode;
5305 void *tmpbuf = NULL;
5308 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5309 KVM_S390_MEMOP_F_CHECK_ONLY |
5310 KVM_S390_MEMOP_F_SKEY_PROTECTION);
5313 if (mop->ar >= NUM_ACRS)
5315 if (kvm_s390_pv_cpu_is_protected(vcpu))
5317 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5318 tmpbuf = vmalloc(mop->size);
5323 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5324 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5325 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5326 acc_mode, mop->key);
5329 if (acc_mode == GACC_FETCH) {
5330 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5331 mop->size, mop->key);
5334 if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5339 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5343 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5344 mop->size, mop->key);
5348 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5349 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5356 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5357 struct kvm_s390_mem_op *mop)
5361 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5364 case KVM_S390_MEMOP_LOGICAL_READ:
5365 case KVM_S390_MEMOP_LOGICAL_WRITE:
5366 r = kvm_s390_vcpu_mem_op(vcpu, mop);
5368 case KVM_S390_MEMOP_SIDA_READ:
5369 case KVM_S390_MEMOP_SIDA_WRITE:
5370 /* we are locked against sida going away by the vcpu->mutex */
5371 r = kvm_s390_vcpu_sida_op(vcpu, mop);
5377 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5381 long kvm_arch_vcpu_async_ioctl(struct file *filp,
5382 unsigned int ioctl, unsigned long arg)
5384 struct kvm_vcpu *vcpu = filp->private_data;
5385 void __user *argp = (void __user *)arg;
5388 case KVM_S390_IRQ: {
5389 struct kvm_s390_irq s390irq;
5391 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5393 return kvm_s390_inject_vcpu(vcpu, &s390irq);
5395 case KVM_S390_INTERRUPT: {
5396 struct kvm_s390_interrupt s390int;
5397 struct kvm_s390_irq s390irq = {};
5399 if (copy_from_user(&s390int, argp, sizeof(s390int)))
5401 if (s390int_to_s390irq(&s390int, &s390irq))
5403 return kvm_s390_inject_vcpu(vcpu, &s390irq);
5406 return -ENOIOCTLCMD;
5409 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5410 struct kvm_pv_cmd *cmd)
5412 struct kvm_s390_pv_dmp dmp;
5416 /* Dump initialization is a prerequisite */
5417 if (!vcpu->kvm->arch.pv.dumping)
5420 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5423 /* We only handle this subcmd right now */
5424 if (dmp.subcmd != KVM_PV_DUMP_CPU)
5427 /* CPU dump length is the same as create cpu storage donation. */
5428 if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5431 data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5435 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5437 VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5438 vcpu->vcpu_id, cmd->rc, cmd->rrc);
5443 /* On success copy over the dump data */
5444 if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5451 long kvm_arch_vcpu_ioctl(struct file *filp,
5452 unsigned int ioctl, unsigned long arg)
5454 struct kvm_vcpu *vcpu = filp->private_data;
5455 void __user *argp = (void __user *)arg;
5463 case KVM_S390_STORE_STATUS:
5464 idx = srcu_read_lock(&vcpu->kvm->srcu);
5465 r = kvm_s390_store_status_unloaded(vcpu, arg);
5466 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5468 case KVM_S390_SET_INITIAL_PSW: {
5472 if (copy_from_user(&psw, argp, sizeof(psw)))
5474 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5477 case KVM_S390_CLEAR_RESET:
5479 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5480 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5481 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5482 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5483 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5487 case KVM_S390_INITIAL_RESET:
5489 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5490 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5491 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5492 UVC_CMD_CPU_RESET_INITIAL,
5494 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5498 case KVM_S390_NORMAL_RESET:
5500 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5501 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5502 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5503 UVC_CMD_CPU_RESET, &rc, &rrc);
5504 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5508 case KVM_SET_ONE_REG:
5509 case KVM_GET_ONE_REG: {
5510 struct kvm_one_reg reg;
5512 if (kvm_s390_pv_cpu_is_protected(vcpu))
5515 if (copy_from_user(®, argp, sizeof(reg)))
5517 if (ioctl == KVM_SET_ONE_REG)
5518 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
5520 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
5523 #ifdef CONFIG_KVM_S390_UCONTROL
5524 case KVM_S390_UCAS_MAP: {
5525 struct kvm_s390_ucas_mapping ucasmap;
5527 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5532 if (!kvm_is_ucontrol(vcpu->kvm)) {
5537 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5538 ucasmap.vcpu_addr, ucasmap.length);
5541 case KVM_S390_UCAS_UNMAP: {
5542 struct kvm_s390_ucas_mapping ucasmap;
5544 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5549 if (!kvm_is_ucontrol(vcpu->kvm)) {
5554 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5559 case KVM_S390_VCPU_FAULT: {
5560 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5563 case KVM_ENABLE_CAP:
5565 struct kvm_enable_cap cap;
5567 if (copy_from_user(&cap, argp, sizeof(cap)))
5569 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5572 case KVM_S390_MEM_OP: {
5573 struct kvm_s390_mem_op mem_op;
5575 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5576 r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5581 case KVM_S390_SET_IRQ_STATE: {
5582 struct kvm_s390_irq_state irq_state;
5585 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5587 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5588 irq_state.len == 0 ||
5589 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5593 /* do not use irq_state.flags, it will break old QEMUs */
5594 r = kvm_s390_set_irq_state(vcpu,
5595 (void __user *) irq_state.buf,
5599 case KVM_S390_GET_IRQ_STATE: {
5600 struct kvm_s390_irq_state irq_state;
5603 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5605 if (irq_state.len == 0) {
5609 /* do not use irq_state.flags, it will break old QEMUs */
5610 r = kvm_s390_get_irq_state(vcpu,
5611 (__u8 __user *) irq_state.buf,
5615 case KVM_S390_PV_CPU_COMMAND: {
5616 struct kvm_pv_cmd cmd;
5619 if (!is_prot_virt_host())
5623 if (copy_from_user(&cmd, argp, sizeof(cmd)))
5630 /* We only handle this cmd right now */
5631 if (cmd.cmd != KVM_PV_DUMP)
5634 r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5636 /* Always copy over UV rc / rrc data */
5637 if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5638 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5650 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5652 #ifdef CONFIG_KVM_S390_UCONTROL
5653 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5654 && (kvm_is_ucontrol(vcpu->kvm))) {
5655 vmf->page = virt_to_page(vcpu->arch.sie_block);
5656 get_page(vmf->page);
5660 return VM_FAULT_SIGBUS;
5663 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5668 /* Section: memory related */
5669 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5670 const struct kvm_memory_slot *old,
5671 struct kvm_memory_slot *new,
5672 enum kvm_mr_change change)
5676 /* When we are protected, we should not change the memory slots */
5677 if (kvm_s390_pv_get_handle(kvm))
5680 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5682 * A few sanity checks. We can have memory slots which have to be
5683 * located/ended at a segment boundary (1MB). The memory in userland is
5684 * ok to be fragmented into various different vmas. It is okay to mmap()
5685 * and munmap() stuff in this slot after doing this call at any time
5688 if (new->userspace_addr & 0xffffful)
5691 size = new->npages * PAGE_SIZE;
5692 if (size & 0xffffful)
5695 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5699 if (!kvm->arch.migration_mode)
5703 * Turn off migration mode when:
5704 * - userspace creates a new memslot with dirty logging off,
5705 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5706 * dirty logging is turned off.
5707 * Migration mode expects dirty page logging being enabled to store
5710 if (change != KVM_MR_DELETE &&
5711 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5712 WARN(kvm_s390_vm_stop_migration(kvm),
5713 "Failed to stop migration mode");
5718 void kvm_arch_commit_memory_region(struct kvm *kvm,
5719 struct kvm_memory_slot *old,
5720 const struct kvm_memory_slot *new,
5721 enum kvm_mr_change change)
5727 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5728 old->npages * PAGE_SIZE);
5731 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5732 old->npages * PAGE_SIZE);
5737 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5738 new->base_gfn * PAGE_SIZE,
5739 new->npages * PAGE_SIZE);
5741 case KVM_MR_FLAGS_ONLY:
5744 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5747 pr_warn("failed to commit memory region\n");
5751 static inline unsigned long nonhyp_mask(int i)
5753 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5755 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5758 static int __init kvm_s390_init(void)
5762 if (!sclp.has_sief2) {
5763 pr_info("SIE is not available\n");
5767 if (nested && hpage) {
5768 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5772 for (i = 0; i < 16; i++)
5773 kvm_s390_fac_base[i] |=
5774 stfle_fac_list[i] & nonhyp_mask(i);
5776 r = __kvm_s390_init();
5780 r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5788 static void __exit kvm_s390_exit(void)
5795 module_init(kvm_s390_init);
5796 module_exit(kvm_s390_exit);
5799 * Enable autoloading of the kvm module.
5800 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5801 * since x86 takes a different approach.
5803 #include <linux/miscdevice.h>
5804 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5805 MODULE_ALIAS("devname:kvm");