KVM: arm64: Allocate memory mapped at hyp for host sve state in pKVM
authorFuad Tabba <tabba@google.com>
Mon, 3 Jun 2024 12:28:47 +0000 (13:28 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 4 Jun 2024 14:06:33 +0000 (15:06 +0100)
Protected mode needs to maintain (save/restore) the host's sve
state, rather than relying on the host kernel to do that. This is
to avoid leaking information to the host about guests and the
type of operations they are performing.

As a first step towards that, allocate memory mapped at hyp, per
cpu, for the host sve state. The following patch will use this
memory to save/restore the host state.

Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20240603122852.3923848-6-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/kvm_pkvm.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/reset.c

index 8170c04..90df7cc 100644 (file)
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
 extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_host_sve_max_vl;
 int __init kvm_arm_init_sve(void);
 
 u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
        u64 *vncr_array;
 };
 
+struct cpu_sve_state {
+       __u64 zcr_el1;
+
+       /*
+        * Ordering is important since __sve_save_state/__sve_restore_state
+        * relies on it.
+        */
+       __u32 fpsr;
+       __u32 fpcr;
+
+       /* Must be SVE_VQ_BYTES (128 bit) aligned. */
+       __u8 sve_regs[];
+};
+
 /*
  * This structure is instantiated on a per-CPU basis, and contains
  * data that is:
@@ -534,7 +549,9 @@ struct kvm_cpu_context {
  */
 struct kvm_host_data {
        struct kvm_cpu_context host_ctxt;
+
        struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+       struct cpu_sve_state *sve_state;        /* hyp VA */
 
        /* Ownership of the FP regs */
        enum {
index 686cce7..b05bcec 100644 (file)
@@ -143,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
 
 extern unsigned long kvm_nvhe_sym(__icache_flags);
 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
 
 #endif /* __ARM64_KVM_HYP_H__ */
index ad9cfb5..cd56acd 100644 (file)
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
        return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
 }
 
+static inline size_t pkvm_host_sve_state_size(void)
+{
+       if (!system_supports_sve())
+               return 0;
+
+       return size_add(sizeof(struct cpu_sve_state),
+                       SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
+}
+
 #endif /* __ARM64_KVM_PKVM_H__ */
index 9996a98..1acf741 100644 (file)
@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
        return size ? get_order(size) : 0;
 }
 
+static size_t pkvm_host_sve_state_order(void)
+{
+       return get_order(pkvm_host_sve_state_size());
+}
+
 /* A lookup table holding the hypervisor VA for each vector slot */
 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
 
@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
 
 static void __init teardown_hyp_mode(void)
 {
+       bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
        int cpu;
 
        free_hyp_pgds();
        for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
                free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
+
+               if (free_sve) {
+                       struct cpu_sve_state *sve_state;
+
+                       sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+                       free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
+               }
        }
 }
 
@@ -2398,6 +2411,50 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
        return 0;
 }
 
+static int init_pkvm_host_sve_state(void)
+{
+       int cpu;
+
+       if (!system_supports_sve())
+               return 0;
+
+       /* Allocate pages for host sve state in protected mode. */
+       for_each_possible_cpu(cpu) {
+               struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
+
+               if (!page)
+                       return -ENOMEM;
+
+               per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
+       }
+
+       /*
+        * Don't map the pages in hyp since these are only used in protected
+        * mode, which will (re)create its own mapping when initialized.
+        */
+
+       return 0;
+}
+
+/*
+ * Finalizes the initialization of hyp mode, once everything else is initialized
+ * and the initialziation process cannot fail.
+ */
+static void finalize_init_hyp_mode(void)
+{
+       int cpu;
+
+       if (!is_protected_kvm_enabled() || !system_supports_sve())
+               return;
+
+       for_each_possible_cpu(cpu) {
+               struct cpu_sve_state *sve_state;
+
+               sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+               per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state);
+       }
+}
+
 static void pkvm_hyp_init_ptrauth(void)
 {
        struct kvm_cpu_context *hyp_ctxt;
@@ -2566,6 +2623,10 @@ static int __init init_hyp_mode(void)
                        goto out_err;
                }
 
+               err = init_pkvm_host_sve_state();
+               if (err)
+                       goto out_err;
+
                err = kvm_hyp_init_protection(hyp_va_bits);
                if (err) {
                        kvm_err("Failed to init hyp memory protection\n");
@@ -2730,6 +2791,13 @@ static __init int kvm_arm_init(void)
        if (err)
                goto out_subs;
 
+       /*
+        * This should be called after initialization is done and failure isn't
+        * possible anymore.
+        */
+       if (!in_hyp_mode)
+               finalize_init_hyp_mode();
+
        kvm_arm_initialised = true;
 
        return 0;
index 16aa487..25e9a94 100644 (file)
@@ -18,6 +18,8 @@ unsigned long __icache_flags;
 /* Used by kvm_get_vttbr(). */
 unsigned int kvm_arm_vmid_bits;
 
+unsigned int kvm_host_sve_max_vl;
+
 /*
  * Set trap register values based on features in ID_AA64PFR0.
  */
index 859f22f..3fae424 100644 (file)
@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
        return 0;
 }
 
+static int pkvm_create_host_sve_mappings(void)
+{
+       void *start, *end;
+       int ret, i;
+
+       if (!system_supports_sve())
+               return 0;
+
+       for (i = 0; i < hyp_nr_cpus; i++) {
+               struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
+               struct cpu_sve_state *sve_state = host_data->sve_state;
+
+               start = kern_hyp_va(sve_state);
+               end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
+               ret = pkvm_create_mappings(start, end, PAGE_HYP);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
                                 unsigned long *per_cpu_base,
                                 u32 hyp_va_bits)
@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
                        return ret;
        }
 
+       pkvm_create_host_sve_mappings();
+
        /*
         * Map the host sections RO in the hypervisor, but transfer the
         * ownership from the host to the hypervisor itself to make sure they
index 1b7b58c..3fc8ca1 100644 (file)
@@ -32,6 +32,7 @@
 
 /* Maximum phys_shift supported for any VM on this host */
 static u32 __ro_after_init kvm_ipa_limit;
+unsigned int __ro_after_init kvm_host_sve_max_vl;
 
 /*
  * ARMv8 Reset Values
@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
 {
        if (system_supports_sve()) {
                kvm_sve_max_vl = sve_max_virtualisable_vl();
+               kvm_host_sve_max_vl = sve_max_vl();
+               kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
 
                /*
                 * The get_sve_reg()/set_sve_reg() ioctl interface will need