KVM: arm64: Move kern_hyp_va() usage in __load_guest_stage2() into the callers
authorMarc Zyngier <maz@kernel.org>
Fri, 6 Aug 2021 11:31:06 +0000 (12:31 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Aug 2021 08:03:42 +0000 (09:03 +0100)
It is a bit awkward to use kern_hyp_va() in __load_guest_stage2(),
specially as the helper is shared between VHE and nVHE.

Instead, move the use of kern_hyp_va() in the nVHE code, and
pass a pointer to the kvm->arch structure instead. Although
this may look a bit awkward, it allows for some further simplification.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jade Alglave <jade.alglave@arm.com>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210806113109.2475-4-will@kernel.org
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/tlb.c

index b52c5c4..05e0896 100644 (file)
@@ -280,9 +280,10 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
+static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
+                                               struct kvm_arch *arch)
 {
-       __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
+       __load_stage2(mmu, arch->vtcr);
 }
 
 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
index f7af968..e50a490 100644 (file)
@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpu_context *host_ctxt;
        struct kvm_cpu_context *guest_ctxt;
+       struct kvm_s2_mmu *mmu;
        bool pmu_switch_needed;
        u64 exit_code;
 
@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
        __sysreg32_restore_state(vcpu);
        __sysreg_restore_state_nvhe(guest_ctxt);
 
-       __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
+       mmu = kern_hyp_va(vcpu->arch.hw_mmu);
+       __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
        __activate_traps(vcpu);
 
        __hyp_vgic_restore_state(vcpu);
index 38ed0f6..7622940 100644 (file)
@@ -39,7 +39,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
         * ensuring that we always have an ISB, but not two ISBs back
         * to back.
         */
-       __load_guest_stage2(mmu);
+       __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
        asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
index b322992..0cb7523 100644 (file)
@@ -128,7 +128,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
         * __load_guest_stage2 configures stage 2 translation, and
         * __activate_traps clear HCR_EL2.TGE (among other things).
         */
-       __load_guest_stage2(vcpu->arch.hw_mmu);
+       __load_guest_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
        __activate_traps(vcpu);
 
        __kvm_adjust_pc(vcpu);
index 66f1734..5e9fb39 100644 (file)
@@ -53,7 +53,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
         * place before clearing TGE. __load_guest_stage2() already
         * has an ISB in order to deal with this.
         */
-       __load_guest_stage2(mmu);
+       __load_guest_stage2(mmu, mmu->arch);
        val = read_sysreg(hcr_el2);
        val &= ~HCR_TGE;
        write_sysreg(val, hcr_el2);