Merge tag 'kvm-s390-next-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / s390 / kvm / kvm-s390.c
index d0ff26d..66da278 100644 (file)
@@ -545,6 +545,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_AIS_MIGRATION:
        case KVM_CAP_S390_VCPU_RESETS:
        case KVM_CAP_SET_GUEST_DEBUG:
+       case KVM_CAP_S390_DIAG318:
                r = 1;
                break;
        case KVM_CAP_S390_HPAGE_1M:
@@ -3267,7 +3268,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                                    KVM_SYNC_ACRS |
                                    KVM_SYNC_CRS |
                                    KVM_SYNC_ARCH0 |
-                                   KVM_SYNC_PFAULT;
+                                   KVM_SYNC_PFAULT |
+                                   KVM_SYNC_DIAG318;
        kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
@@ -3562,6 +3564,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
                vcpu->arch.sie_block->pp = 0;
                vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
                vcpu->arch.sie_block->todpr = 0;
+               vcpu->arch.sie_block->cpnc = 0;
        }
 }
 
@@ -3579,6 +3582,7 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
 
        regs->etoken = 0;
        regs->etoken_extension = 0;
+       regs->diag318 = 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -3923,11 +3927,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
        }
 }
 
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work)
 {
        trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
        __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
+
+       return true;
 }
 
 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
@@ -3952,33 +3958,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
 {
        hva_t hva;
        struct kvm_arch_async_pf arch;
-       int rc;
 
        if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
-               return 0;
+               return false;
        if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
            vcpu->arch.pfault_compare)
-               return 0;
+               return false;
        if (psw_extint_disabled(vcpu))
-               return 0;
+               return false;
        if (kvm_s390_vcpu_has_irq(vcpu, 0))
-               return 0;
+               return false;
        if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
-               return 0;
+               return false;
        if (!vcpu->arch.gmap->pfault_enabled)
-               return 0;
+               return false;
 
        hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
        hva += current->thread.gmap_addr & ~PAGE_MASK;
        if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
-               return 0;
+               return false;
 
-       rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
-       return rc;
+       return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
 }
 
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
@@ -4173,8 +4177,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+       struct kvm_run *kvm_run = vcpu->run;
        struct runtime_instr_cb *riccb;
        struct gs_cb *gscb;
 
@@ -4194,6 +4199,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
                        kvm_clear_async_pf_completion_queue(vcpu);
        }
+       if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
+               vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
+               vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
+       }
        /*
         * If userspace sets the riccb (e.g. after migration) to a valid state,
         * we should enable RI here instead of doing the lazy enablement.
@@ -4240,8 +4249,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        /* SIE will load etoken directly from SDNX and therefore kvm_run */
 }
 
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_run *kvm_run = vcpu->run;
+
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
                kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
@@ -4270,7 +4281,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        /* Sync fmt2 only data */
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
-               sync_regs_fmt2(vcpu, kvm_run);
+               sync_regs_fmt2(vcpu);
        } else {
                /*
                 * In several places we have to modify our internal view to
@@ -4289,12 +4300,15 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        kvm_run->kvm_dirty_regs = 0;
 }
 
-static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+       struct kvm_run *kvm_run = vcpu->run;
+
        kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
        kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
        kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
        kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
+       kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
        if (MACHINE_HAS_GS) {
                __ctl_set_bit(2, 4);
                if (vcpu->arch.gs_enabled)
@@ -4310,8 +4324,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        /* SIE will save etoken directly into SDNX and therefore kvm_run */
 }
 
-static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_run *kvm_run = vcpu->run;
+
        kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
        kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
        kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
@@ -4330,7 +4346,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
        current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
-               store_regs_fmt2(vcpu, kvm_run);
+               store_regs_fmt2(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@@ -4368,7 +4384,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       sync_regs(vcpu, kvm_run);
+       sync_regs(vcpu);
        enable_cpu_timer_accounting(vcpu);
 
        might_fault();
@@ -4390,7 +4406,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        }
 
        disable_cpu_timer_accounting(vcpu);
-       store_regs(vcpu, kvm_run);
+       store_regs(vcpu);
 
        kvm_sigset_deactivate(vcpu);