Merge remote-tracking branch 's390/guarded-storage' into kvms390/next
authorChristian Borntraeger <borntraeger@de.ibm.com>
Wed, 22 Mar 2017 17:54:52 +0000 (18:54 +0100)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Wed, 22 Mar 2017 17:54:52 +0000 (18:54 +0100)
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/gaccess.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/sthyi.c
arch/s390/kvm/vsie.c

index a41faf3..54e36e7 100644 (file)
@@ -164,11 +164,21 @@ struct kvm_s390_sie_block {
 #define ICTL_RRBE      0x00001000
 #define ICTL_TPROT     0x00000200
        __u32   ictl;                   /* 0x0048 */
+#define ECA_CEI                0x80000000
+#define ECA_IB         0x40000000
+#define ECA_SIGPI      0x10000000
+#define ECA_MVPGI      0x01000000
+#define ECA_VX         0x00020000
+#define ECA_PROTEXCI   0x00002000
+#define ECA_SII                0x00000001
        __u32   eca;                    /* 0x004c */
 #define ICPT_INST      0x04
 #define ICPT_PROGI     0x08
 #define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ    0x10
 #define ICPT_EXTINT    0x14
+#define ICPT_IOREQ     0x18
+#define ICPT_WAIT      0x1c
 #define ICPT_VALIDITY  0x20
 #define ICPT_STOP      0x28
 #define ICPT_OPEREXC   0x2C
@@ -182,10 +192,18 @@ struct kvm_s390_sie_block {
        __u32   ipb;                    /* 0x0058 */
        __u32   scaoh;                  /* 0x005c */
        __u8    reserved60;             /* 0x0060 */
+#define ECB_TE         0x10
+#define ECB_SRSI       0x04
+#define ECB_HOSTPROTINT        0x02
        __u8    ecb;                    /* 0x0061 */
+#define ECB2_CMMA      0x80
+#define ECB2_IEP       0x20
+#define ECB2_PFMFI     0x08
+#define ECB2_ESCA      0x04
        __u8    ecb2;                   /* 0x0062 */
-#define ECB3_AES 0x04
 #define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI  0x01
        __u8    ecb3;                   /* 0x0063 */
        __u32   scaol;                  /* 0x0064 */
        __u8    reserved68[4];          /* 0x0068 */
@@ -224,6 +242,7 @@ struct kvm_s390_sie_block {
        __u8    reserved1a4[20];        /* 0x01a4 */
        __u64   cbrlo;                  /* 0x01b8 */
        __u8    reserved1c0[8];         /* 0x01c0 */
+#define ECD_HOSTREGMGMT        0x20000000
        __u32   ecd;                    /* 0x01c8 */
        __u8    reserved1cc[18];        /* 0x01cc */
        __u64   pp;                     /* 0x01de */
index d55c829..709aca9 100644 (file)
@@ -262,7 +262,7 @@ struct aste {
 
 int ipte_lock_held(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.sie_block->eca & 1) {
+       if (vcpu->arch.sie_block->eca & ECA_SII) {
                int rc;
 
                read_lock(&vcpu->kvm->arch.sca_lock);
@@ -361,7 +361,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
 
 void ipte_lock(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.sie_block->eca & 1)
+       if (vcpu->arch.sie_block->eca & ECA_SII)
                ipte_lock_siif(vcpu);
        else
                ipte_lock_simple(vcpu);
@@ -369,7 +369,7 @@ void ipte_lock(struct kvm_vcpu *vcpu)
 
 void ipte_unlock(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.sie_block->eca & 1)
+       if (vcpu->arch.sie_block->eca & ECA_SII)
                ipte_unlock_siif(vcpu);
        else
                ipte_unlock_simple(vcpu);
index 59920f9..e831f4b 100644 (file)
@@ -368,8 +368,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
        trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
                                      vcpu->arch.sie_block->ipb);
 
-       if (vcpu->arch.sie_block->ipa == 0xb256 &&
-           test_kvm_facility(vcpu->kvm, 74))
+       if (vcpu->arch.sie_block->ipa == 0xb256)
                return handle_sthyi(vcpu);
 
        if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
@@ -404,26 +403,26 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP;
 
        switch (vcpu->arch.sie_block->icptcode) {
-       case 0x10:
-       case 0x18:
+       case ICPT_EXTREQ:
+       case ICPT_IOREQ:
                return handle_noop(vcpu);
-       case 0x04:
+       case ICPT_INST:
                rc = handle_instruction(vcpu);
                break;
-       case 0x08:
+       case ICPT_PROGI:
                return handle_prog(vcpu);
-       case 0x14:
+       case ICPT_EXTINT:
                return handle_external_interrupt(vcpu);
-       case 0x1c:
+       case ICPT_WAIT:
                return kvm_s390_handle_wait(vcpu);
-       case 0x20:
+       case ICPT_VALIDITY:
                return handle_validity(vcpu);
-       case 0x28:
+       case ICPT_STOP:
                return handle_stop(vcpu);
-       case 0x2c:
+       case ICPT_OPEREXC:
                rc = handle_operexc(vcpu);
                break;
-       case 0x38:
+       case ICPT_PARTEXEC:
                rc = handle_partial_execution(vcpu);
                break;
        default:
index fd6cd05..976373c 100644 (file)
@@ -1646,7 +1646,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
                sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
                vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
                vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
-               vcpu->arch.sie_block->ecb2 |= 0x04U;
+               vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
                set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
        } else {
                struct bsca_block *sca = vcpu->kvm->arch.sca;
@@ -1700,7 +1700,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
        kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
                vcpu->arch.sie_block->scaoh = scaoh;
                vcpu->arch.sie_block->scaol = scaol;
-               vcpu->arch.sie_block->ecb2 |= 0x04U;
+               vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
        }
        kvm->arch.sca = new_sca;
        kvm->arch.use_esca = 1;
@@ -1939,8 +1939,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
        if (!vcpu->arch.sie_block->cbrlo)
                return -ENOMEM;
 
-       vcpu->arch.sie_block->ecb2 |= 0x80;
-       vcpu->arch.sie_block->ecb2 &= ~0x08;
+       vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
+       vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
        return 0;
 }
 
@@ -1970,28 +1970,28 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 
        /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
        if (MACHINE_HAS_ESOP)
-               vcpu->arch.sie_block->ecb |= 0x02;
+               vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
        if (test_kvm_facility(vcpu->kvm, 9))
-               vcpu->arch.sie_block->ecb |= 0x04;
+               vcpu->arch.sie_block->ecb |= ECB_SRSI;
        if (test_kvm_facility(vcpu->kvm, 73))
-               vcpu->arch.sie_block->ecb |= 0x10;
+               vcpu->arch.sie_block->ecb |= ECB_TE;
 
        if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
-               vcpu->arch.sie_block->ecb2 |= 0x08;
+               vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
        if (test_kvm_facility(vcpu->kvm, 130))
-               vcpu->arch.sie_block->ecb2 |= 0x20;
-       vcpu->arch.sie_block->eca = 0x1002000U;
+               vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
+       vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
        if (sclp.has_cei)
-               vcpu->arch.sie_block->eca |= 0x80000000U;
+               vcpu->arch.sie_block->eca |= ECA_CEI;
        if (sclp.has_ib)
-               vcpu->arch.sie_block->eca |= 0x40000000U;
+               vcpu->arch.sie_block->eca |= ECA_IB;
        if (sclp.has_siif)
-               vcpu->arch.sie_block->eca |= 1;
+               vcpu->arch.sie_block->eca |= ECA_SII;
        if (sclp.has_sigpif)
-               vcpu->arch.sie_block->eca |= 0x10000000U;
+               vcpu->arch.sie_block->eca |= ECA_SIGPI;
        if (test_kvm_facility(vcpu->kvm, 129)) {
-               vcpu->arch.sie_block->eca |= 0x00020000;
-               vcpu->arch.sie_block->ecd |= 0x20000000;
+               vcpu->arch.sie_block->eca |= ECA_VX;
+               vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
        }
        vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
        vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
@@ -2719,6 +2719,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 
 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
+       struct runtime_instr_cb *riccb;
+
+       riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
@@ -2747,12 +2750,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
         * we should enable RI here instead of doing the lazy enablement.
         */
        if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
-           test_kvm_facility(vcpu->kvm, 64)) {
-               struct runtime_instr_cb *riccb =
-                       (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
-
-               if (riccb->valid)
-                       vcpu->arch.sie_block->ecb3 |= 0x01;
+           test_kvm_facility(vcpu->kvm, 64) &&
+           riccb->valid &&
+           !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
+               VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
+               vcpu->arch.sie_block->ecb3 |= ECB3_RI;
        }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
index af9fa91..dfdcde1 100644 (file)
@@ -25,7 +25,7 @@
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 
 /* Transactional Memory Execution related macros */
-#define IS_TE_ENABLED(vcpu)    ((vcpu->arch.sie_block->ecb & 0x10))
+#define IS_TE_ENABLED(vcpu)    ((vcpu->arch.sie_block->ecb & ECB_TE))
 #define TDB_FORMAT1            1
 #define IS_ITDB_VALID(vcpu)    ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
 
index 64b6a30..d42bb2d 100644 (file)
@@ -37,7 +37,8 @@
 static int handle_ri(struct kvm_vcpu *vcpu)
 {
        if (test_kvm_facility(vcpu->kvm, 64)) {
-               vcpu->arch.sie_block->ecb3 |= 0x01;
+               VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
+               vcpu->arch.sie_block->ecb3 |= ECB3_RI;
                kvm_s390_retry_instr(vcpu);
                return 0;
        } else
@@ -759,6 +760,7 @@ static const intercept_handler_t b2_handlers[256] = {
        [0x3b] = handle_io_inst,
        [0x3c] = handle_io_inst,
        [0x50] = handle_ipte_interlock,
+       [0x56] = handle_sthyi,
        [0x5f] = handle_io_inst,
        [0x74] = handle_io_inst,
        [0x76] = handle_io_inst,
index 05c98bb..926b524 100644 (file)
@@ -404,6 +404,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
        u64 code, addr, cc = 0;
        struct sthyi_sctns *sctns = NULL;
 
+       if (!test_kvm_facility(vcpu->kvm, 74))
+               return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
        /*
         * STHYI requires extensive locking in the higher hypervisors
         * and is very computational/memory expensive. Therefore we
index 5491be3..d91f1df 100644 (file)
@@ -249,7 +249,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 {
        struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
-       bool had_tx = scb_s->ecb & 0x10U;
+       bool had_tx = scb_s->ecb & ECB_TE;
        unsigned long new_mso = 0;
        int rc;
 
@@ -307,34 +307,34 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                scb_s->ihcpu = scb_o->ihcpu;
 
        /* MVPG and Protection Exception Interpretation are always available */
-       scb_s->eca |= scb_o->eca & 0x01002000U;
+       scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
        /* Host-protection-interruption introduced with ESOP */
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
-               scb_s->ecb |= scb_o->ecb & 0x02U;
+               scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
        /* transactional execution */
        if (test_kvm_facility(vcpu->kvm, 73)) {
                /* remap the prefix is tx is toggled on */
-               if ((scb_o->ecb & 0x10U) && !had_tx)
+               if ((scb_o->ecb & ECB_TE) && !had_tx)
                        prefix_unmapped(vsie_page);
-               scb_s->ecb |= scb_o->ecb & 0x10U;
+               scb_s->ecb |= scb_o->ecb & ECB_TE;
        }
        /* SIMD */
        if (test_kvm_facility(vcpu->kvm, 129)) {
-               scb_s->eca |= scb_o->eca & 0x00020000U;
-               scb_s->ecd |= scb_o->ecd & 0x20000000U;
+               scb_s->eca |= scb_o->eca & ECA_VX;
+               scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
        }
        /* Run-time-Instrumentation */
        if (test_kvm_facility(vcpu->kvm, 64))
-               scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
+               scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
        /* Instruction Execution Prevention */
        if (test_kvm_facility(vcpu->kvm, 130))
-               scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
+               scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
-               scb_s->eca |= scb_o->eca & 0x00000001U;
+               scb_s->eca |= scb_o->eca & ECA_SII;
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
-               scb_s->eca |= scb_o->eca & 0x40000000U;
+               scb_s->eca |= scb_o->eca & ECA_IB;
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
-               scb_s->eca |= scb_o->eca & 0x80000000U;
+               scb_s->eca |= scb_o->eca & ECA_CEI;
 
        prepare_ibc(vcpu, vsie_page);
        rc = shadow_crycb(vcpu, vsie_page);
@@ -406,7 +406,7 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        prefix += scb_s->mso;
 
        rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
-       if (!rc && (scb_s->ecb & 0x10U))
+       if (!rc && (scb_s->ecb & ECB_TE))
                rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
                                           prefix + PAGE_SIZE);
        /*
@@ -543,7 +543,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        }
 
        gpa = scb_o->itdba & ~0xffUL;
-       if (gpa && (scb_s->ecb & 0x10U)) {
+       if (gpa && (scb_s->ecb & ECB_TE)) {
                if (!(gpa & ~0x1fffU)) {
                        rc = set_validity_icpt(scb_s, 0x0080U);
                        goto unpin;
@@ -558,8 +558,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        }
 
        gpa = scb_o->gvrd & ~0x1ffUL;
-       if (gpa && (scb_s->eca & 0x00020000U) &&
-           !(scb_s->ecd & 0x20000000U)) {
+       if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
                if (!(gpa & ~0x1fffUL)) {
                        rc = set_validity_icpt(scb_s, 0x1310U);
                        goto unpin;
@@ -577,7 +576,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        }
 
        gpa = scb_o->riccbd & ~0x3fUL;
-       if (gpa && (scb_s->ecb3 & 0x01U)) {
+       if (gpa && (scb_s->ecb3 & ECB3_RI)) {
                if (!(gpa & ~0x1fffUL)) {
                        rc = set_validity_icpt(scb_s, 0x0043U);
                        goto unpin;