2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
80 static unsigned long long *facilities;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
106 int kvm_arch_init(void *opaque)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
140 /* Section: vm related */
142 * Get (and clear) the dirty memory log for a memory slot.
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
150 long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
164 r = kvm_s390_inject_vm(kvm, &s390int);
174 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
180 #ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
190 rc = s390_enable_sie();
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
200 sprintf(debug_name, "kvm-%u", current->pid);
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
215 kvm->arch.gmap = gmap_alloc(current->mm);
221 debug_unregister(kvm->arch.dbf);
223 free_page((unsigned long)(kvm->arch.sca));
228 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
237 if (kvm_is_ucontrol(vcpu->kvm))
238 gmap_free(vcpu->arch.gmap);
240 free_page((unsigned long)(vcpu->arch.sie_block));
241 kvm_vcpu_uninit(vcpu);
245 static void kvm_free_vcpus(struct kvm *kvm)
248 struct kvm_vcpu *vcpu;
250 kvm_for_each_vcpu(i, vcpu, kvm)
251 kvm_arch_vcpu_destroy(vcpu);
253 mutex_lock(&kvm->lock);
254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255 kvm->vcpus[i] = NULL;
257 atomic_set(&kvm->online_vcpus, 0);
258 mutex_unlock(&kvm->lock);
261 void kvm_arch_sync_events(struct kvm *kvm)
265 void kvm_arch_destroy_vm(struct kvm *kvm)
268 free_page((unsigned long)(kvm->arch.sca));
269 debug_unregister(kvm->arch.dbf);
270 if (!kvm_is_ucontrol(kvm))
271 gmap_free(kvm->arch.gmap);
274 /* Section: vcpu related */
275 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
277 if (kvm_is_ucontrol(vcpu->kvm)) {
278 vcpu->arch.gmap = gmap_alloc(current->mm);
279 if (!vcpu->arch.gmap)
284 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
288 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
293 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
295 save_fp_regs(&vcpu->arch.host_fpregs);
296 save_access_regs(vcpu->arch.host_acrs);
297 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298 restore_fp_regs(&vcpu->arch.guest_fpregs);
299 restore_access_regs(vcpu->arch.guest_acrs);
300 gmap_enable(vcpu->arch.gmap);
301 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
304 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
306 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
307 gmap_disable(vcpu->arch.gmap);
308 save_fp_regs(&vcpu->arch.guest_fpregs);
309 save_access_regs(vcpu->arch.guest_acrs);
310 restore_fp_regs(&vcpu->arch.host_fpregs);
311 restore_access_regs(vcpu->arch.host_acrs);
314 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
316 /* this equals initial cpu reset in pop, but we don't switch to ESA */
317 vcpu->arch.sie_block->gpsw.mask = 0UL;
318 vcpu->arch.sie_block->gpsw.addr = 0UL;
319 vcpu->arch.sie_block->prefix = 0UL;
320 vcpu->arch.sie_block->ihcpu = 0xffff;
321 vcpu->arch.sie_block->cputm = 0UL;
322 vcpu->arch.sie_block->ckc = 0UL;
323 vcpu->arch.sie_block->todpr = 0;
324 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
326 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327 vcpu->arch.guest_fpregs.fpc = 0;
328 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329 vcpu->arch.sie_block->gbea = 1;
332 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
337 vcpu->arch.sie_block->ecb = 6;
338 vcpu->arch.sie_block->eca = 0xC1002001U;
339 vcpu->arch.sie_block->fac = (int) (long) facilities;
340 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342 (unsigned long) vcpu);
343 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
344 get_cpu_id(&vcpu->arch.cpu_id);
345 vcpu->arch.cpu_id.version = 0xff;
349 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
352 struct kvm_vcpu *vcpu;
355 if (id >= KVM_MAX_VCPUS)
360 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
364 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365 get_zeroed_page(GFP_KERNEL);
367 if (!vcpu->arch.sie_block)
370 vcpu->arch.sie_block->icpua = id;
371 BUG_ON(!kvm->arch.sca);
372 if (!kvm->arch.sca->cpu[id].sda)
373 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
374 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
376 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
378 spin_lock_init(&vcpu->arch.local_int.lock);
379 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
381 spin_lock(&kvm->arch.float_int.lock);
382 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383 init_waitqueue_head(&vcpu->arch.local_int.wq);
384 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
385 spin_unlock(&kvm->arch.float_int.lock);
387 rc = kvm_vcpu_init(vcpu, kvm, id);
389 goto out_free_sie_block;
390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391 vcpu->arch.sie_block);
395 free_page((unsigned long)(vcpu->arch.sie_block));
402 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
404 /* kvm common code refers to this, but never calls it */
409 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
411 kvm_s390_vcpu_initial_reset(vcpu);
415 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
417 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
421 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
427 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428 struct kvm_sregs *sregs)
430 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
432 restore_access_regs(vcpu->arch.guest_acrs);
436 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437 struct kvm_sregs *sregs)
439 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
444 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
446 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
448 restore_fp_regs(&vcpu->arch.guest_fpregs);
452 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
454 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
459 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
463 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
466 vcpu->run->psw_mask = psw.mask;
467 vcpu->run->psw_addr = psw.addr;
472 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473 struct kvm_translation *tr)
475 return -EINVAL; /* not implemented yet */
478 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479 struct kvm_guest_debug *dbg)
481 return -EINVAL; /* not implemented yet */
484 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
487 return -EINVAL; /* not implemented yet */
490 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491 struct kvm_mp_state *mp_state)
493 return -EINVAL; /* not implemented yet */
496 static void __vcpu_run(struct kvm_vcpu *vcpu)
498 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
503 if (test_thread_flag(TIF_MCCK_PENDING))
506 kvm_s390_deliver_pending_interrupts(vcpu);
508 vcpu->arch.sie_block->icptcode = 0;
512 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
513 atomic_read(&vcpu->arch.sie_block->cpuflags));
514 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
515 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
516 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
518 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
519 vcpu->arch.sie_block->icptcode);
524 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
527 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
533 if (vcpu->sigset_active)
534 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
536 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
538 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
540 switch (kvm_run->exit_reason) {
541 case KVM_EXIT_S390_SIEIC:
542 case KVM_EXIT_UNKNOWN:
544 case KVM_EXIT_S390_RESET:
550 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
551 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
557 rc = kvm_handle_sie_intercept(vcpu);
558 } while (!signal_pending(current) && !rc);
560 if (rc == SIE_INTERCEPT_RERUNVCPU)
563 if (signal_pending(current) && !rc) {
564 kvm_run->exit_reason = KVM_EXIT_INTR;
568 if (rc == -EOPNOTSUPP) {
569 /* intercept cannot be handled in-kernel, prepare kvm-run */
570 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
571 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
572 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
573 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
577 if (rc == -EREMOTE) {
578 /* intercept was handled, but userspace support is needed
579 * kvm_run has been prepared by the handler */
583 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
584 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
586 if (vcpu->sigset_active)
587 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
589 vcpu->stat.exit_userspace++;
593 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
594 unsigned long n, int prefix)
597 return copy_to_guest(vcpu, guestdest, from, n);
599 return copy_to_guest_absolute(vcpu, guestdest, from, n);
603 * store status at address
604 * we use have two special cases:
605 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
606 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
608 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
610 unsigned char archmode = 1;
613 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
614 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
616 addr = SAVE_AREA_BASE;
618 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
619 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
621 addr = SAVE_AREA_BASE;
626 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
627 vcpu->arch.guest_fpregs.fprs, 128, prefix))
630 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
631 vcpu->arch.guest_gprs, 128, prefix))
634 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
635 &vcpu->arch.sie_block->gpsw, 16, prefix))
638 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
639 &vcpu->arch.sie_block->prefix, 4, prefix))
642 if (__guestcopy(vcpu,
643 addr + offsetof(struct save_area, fp_ctrl_reg),
644 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
647 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
648 &vcpu->arch.sie_block->todpr, 4, prefix))
651 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
652 &vcpu->arch.sie_block->cputm, 8, prefix))
655 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
656 &vcpu->arch.sie_block->ckc, 8, prefix))
659 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
660 &vcpu->arch.guest_acrs, 64, prefix))
663 if (__guestcopy(vcpu,
664 addr + offsetof(struct save_area, ctrl_regs),
665 &vcpu->arch.sie_block->gcr, 128, prefix))
670 long kvm_arch_vcpu_ioctl(struct file *filp,
671 unsigned int ioctl, unsigned long arg)
673 struct kvm_vcpu *vcpu = filp->private_data;
674 void __user *argp = (void __user *)arg;
678 case KVM_S390_INTERRUPT: {
679 struct kvm_s390_interrupt s390int;
682 if (copy_from_user(&s390int, argp, sizeof(s390int)))
684 r = kvm_s390_inject_vcpu(vcpu, &s390int);
687 case KVM_S390_STORE_STATUS:
688 r = kvm_s390_vcpu_store_status(vcpu, arg);
690 case KVM_S390_SET_INITIAL_PSW: {
694 if (copy_from_user(&psw, argp, sizeof(psw)))
696 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
699 case KVM_S390_INITIAL_RESET:
700 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
702 #ifdef CONFIG_KVM_S390_UCONTROL
703 case KVM_S390_UCAS_MAP: {
704 struct kvm_s390_ucas_mapping ucasmap;
706 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
711 if (!kvm_is_ucontrol(vcpu->kvm)) {
716 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
717 ucasmap.vcpu_addr, ucasmap.length);
720 case KVM_S390_UCAS_UNMAP: {
721 struct kvm_s390_ucas_mapping ucasmap;
723 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
728 if (!kvm_is_ucontrol(vcpu->kvm)) {
733 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
744 /* Section: memory related */
745 int kvm_arch_prepare_memory_region(struct kvm *kvm,
746 struct kvm_memory_slot *memslot,
747 struct kvm_memory_slot old,
748 struct kvm_userspace_memory_region *mem,
751 /* A few sanity checks. We can have exactly one memory slot which has
752 to start at guest virtual zero and which has to be located at a
753 page boundary in userland and which has to end at a page boundary.
754 The memory in userland is ok to be fragmented into various different
755 vmas. It is okay to mmap() and munmap() stuff in this slot after
756 doing this call at any time */
761 if (mem->guest_phys_addr)
764 if (mem->userspace_addr & 0xffffful)
767 if (mem->memory_size & 0xffffful)
776 void kvm_arch_commit_memory_region(struct kvm *kvm,
777 struct kvm_userspace_memory_region *mem,
778 struct kvm_memory_slot old,
784 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
785 mem->guest_phys_addr, mem->memory_size);
787 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
791 void kvm_arch_flush_shadow(struct kvm *kvm)
795 static int __init kvm_s390_init(void)
798 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
803 * guests can ask for up to 255+1 double words, we need a full page
804 * to hold the maximum amount of facilities. On the other hand, we
805 * only set facilities that are known to work in KVM.
807 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
812 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
813 facilities[0] &= 0xff00fff3f47c0000ULL;
814 facilities[1] &= 0x201c000000000000ULL;
818 static void __exit kvm_s390_exit(void)
820 free_page((unsigned long) facilities);
824 module_init(kvm_s390_init);
825 module_exit(kvm_s390_exit);