Merge tag 'linux-watchdog-5.15-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux-2.6-microblaze.git] / arch / arm64 / kvm / sys_regs.c
index a1f5101..1d46e18 100644 (file)
  * 64bit interface.
  */
 
-#define reg_to_encoding(x)                                             \
-       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
-               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
 static bool read_from_write_only(struct kvm_vcpu *vcpu,
                                 struct sys_reg_params *params,
                                 const struct sys_reg_desc *r)
@@ -318,14 +314,14 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
 /*
  * We want to avoid world-switching all the DBG registers all the
  * time:
- * 
+ *
  * - If we've touched any debug register, it is likely that we're
  *   going to touch more of them. It then makes sense to disable the
  *   traps and start doing the save/restore dance
  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  *   then mandatory to save/restore the registers, as the guest
  *   depends on them.
- * 
+ *
  * For this, we use a DIRTY bit, indicating the guest has modified the
  * debug registers, used as follow:
  *
@@ -1063,8 +1059,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
        return true;
 }
 
-#define FEATURE(x)     (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
-
 /* Read a sanitised cpufeature ID register by sys_reg_desc */
 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                struct sys_reg_desc const *r, bool raz)
@@ -1075,40 +1069,40 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
        switch (id) {
        case SYS_ID_AA64PFR0_EL1:
                if (!vcpu_has_sve(vcpu))
-                       val &= ~FEATURE(ID_AA64PFR0_SVE);
-               val &= ~FEATURE(ID_AA64PFR0_AMU);
-               val &= ~FEATURE(ID_AA64PFR0_CSV2);
-               val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
-               val &= ~FEATURE(ID_AA64PFR0_CSV3);
-               val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
+               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
+               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
                break;
        case SYS_ID_AA64PFR1_EL1:
-               val &= ~FEATURE(ID_AA64PFR1_MTE);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
                if (kvm_has_mte(vcpu->kvm)) {
                        u64 pfr, mte;
 
                        pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
                        mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
-                       val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
+                       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
                }
                break;
        case SYS_ID_AA64ISAR1_EL1:
                if (!vcpu_has_ptrauth(vcpu))
-                       val &= ~(FEATURE(ID_AA64ISAR1_APA) |
-                                FEATURE(ID_AA64ISAR1_API) |
-                                FEATURE(ID_AA64ISAR1_GPA) |
-                                FEATURE(ID_AA64ISAR1_GPI));
+                       val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
+                                ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
                break;
        case SYS_ID_AA64DFR0_EL1:
                /* Limit debug to ARMv8.0 */
-               val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
-               val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
+               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
                /* Limit guests to PMUv3 for ARMv8.4 */
                val = cpuid_feature_cap_perfmon_field(val,
                                                      ID_AA64DFR0_PMUVER_SHIFT,
                                                      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
                /* Hide SPE from guests */
-               val &= ~FEATURE(ID_AA64DFR0_PMSVER);
+               val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
                break;
        case SYS_ID_DFR0_EL1:
                /* Limit guests to PMUv3 for ARMv8.4 */
@@ -2162,23 +2156,6 @@ static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
        return 0;
 }
 
-static int match_sys_reg(const void *key, const void *elt)
-{
-       const unsigned long pval = (unsigned long)key;
-       const struct sys_reg_desc *r = elt;
-
-       return pval - reg_to_encoding(r);
-}
-
-static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
-                                        const struct sys_reg_desc table[],
-                                        unsigned int num)
-{
-       unsigned long pval = reg_to_encoding(params);
-
-       return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
-}
-
 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
 {
        kvm_inject_undefined(vcpu);
@@ -2421,13 +2398,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
 
        trace_kvm_handle_sys_reg(esr);
 
-       params.Op0 = (esr >> 20) & 3;
-       params.Op1 = (esr >> 14) & 0x7;
-       params.CRn = (esr >> 10) & 0xf;
-       params.CRm = (esr >> 1) & 0xf;
-       params.Op2 = (esr >> 17) & 0x7;
+       params = esr_sys64_to_params(esr);
        params.regval = vcpu_get_reg(vcpu, Rt);
-       params.is_write = !(esr & 1);
 
        ret = emulate_sys_reg(vcpu, &params);