KVM: arm64: Add save/restore support for FPMR
authorMarc Zyngier <maz@kernel.org>
Tue, 20 Aug 2024 13:17:58 +0000 (14:17 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 27 Aug 2024 06:59:27 +0000 (07:59 +0100)
Just like the rest of the FP/SIMD state, FPMR needs to be context
switched.

The only interesting thing here is that we need to treat the pKVM
part a bit differently, as the host FP state is never written back
to the vcpu thread, but instead stored locally and eagerly restored.

Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20240820131802.3547589-5-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index 021f7a1..a6b684c 100644 (file)
@@ -599,6 +599,16 @@ struct kvm_host_data {
                struct cpu_sve_state *sve_state;
        };
 
+       union {
+               /* HYP VA pointer to the host storage for FPMR */
+               u64     *fpmr_ptr;
+               /*
+                * Used by pKVM only, as it needs to provide storage
+                * for the host
+                */
+               u64     fpmr;
+       };
+
        /* Ownership of the FP regs */
        enum {
                FP_STATE_FREE,
index 4cb8ad5..ea5484c 100644 (file)
@@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
         */
        *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
        *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
+       *host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
 
        vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
index f59ccfe..84a135b 100644 (file)
@@ -404,6 +404,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
        else
                __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
 
+       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
+               write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
+
        /* Skip restoring fpexc32 for AArch64 guests */
        if (!(read_sysreg(hcr_el2) & HCR_RW))
                write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
index f43d845..87692b5 100644 (file)
@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void)
 
 static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
 {
+       bool has_fpmr;
+
        if (!guest_owns_fp_regs())
                return;
 
@@ -73,11 +75,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
        else
                __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
 
+       has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
+       if (has_fpmr)
+               __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
+
        if (system_supports_sve())
                __hyp_sve_restore_host();
        else
                __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
 
+       if (has_fpmr)
+               write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
+
        *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
 }
 
index 6af179c..c0832ca 100644 (file)
@@ -198,6 +198,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
        } else {
                __fpsimd_save_state(*host_data_ptr(fpsimd_state));
        }
+
+       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
+               u64 val = read_sysreg_s(SYS_FPMR);
+
+               if (unlikely(is_protected_kvm_enabled()))
+                       *host_data_ptr(fpmr) = val;
+               else
+                       **host_data_ptr(fpmr_ptr) = val;
+       }
 }
 
 static const exit_handler_fn hyp_exit_handlers[] = {
index 77010b7..80581b1 100644 (file)
@@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 {
        __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+
+       if (kvm_has_fpmr(vcpu->kvm))
+               **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
 }
 
 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)