arm64: Rename WORKAROUND_1319367 to SPECULATIVE_AT_NVHE
authorSteven Price <steven.price@arm.com>
Mon, 16 Dec 2019 11:56:30 +0000 (11:56 +0000)
committerWill Deacon <will@kernel.org>
Thu, 16 Jan 2020 10:44:11 +0000 (10:44 +0000)
To match SPECULATIVE_AT_VHE let's also have a generic name for the NVHE
variant.

Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/hyp/tlb.c

index b2f0df1..d102ebd 100644 (file)
@@ -546,9 +546,13 @@ config ARM64_ERRATUM_1286807
          invalidated has been observed by other observers. The
          workaround repeats the TLBI+DSB operation.
 
+config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
+       bool
+
 config ARM64_ERRATUM_1319367
        bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
        help
          This option adds work arounds for ARM Cortex-A57 erratum 1319537
          and A72 erratum 1319367
index 327a38a..3d1aa1b 100644 (file)
@@ -55,7 +55,7 @@
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 #define ARM64_WORKAROUND_1542419               47
-#define ARM64_WORKAROUND_1319367               48
+#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE   48
 
 #define ARM64_NCAPS                            49
 
index 7886ddb..0332fca 100644 (file)
@@ -934,7 +934,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1319367
        {
                .desc = "ARM erratum 1319367",
-               .capability = ARM64_WORKAROUND_1319367,
+               .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
                ERRATA_MIDR_RANGE_LIST(ca57_a72),
        },
 #endif
index eefcaa6..0fc824b 100644 (file)
@@ -119,7 +119,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        write_sysreg(val, cptr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
 
                isb();
@@ -173,7 +173,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
index 22b8128..7672a97 100644 (file)
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
        write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
 
-       if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
                write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
        } else  if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
        write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
            ctxt->__hyp_running_vcpu) {
                /*
                 * Must only be done for host registers, hence the context
index c827f3e..ff4e73c 100644 (file)
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
                                                  struct tlb_inv_context *cxt)
 {
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
 {
        write_sysreg(0, vttbr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                /* Ensure write of the host VMID */
                isb();
                /* Restore the host's TCR_EL1 */