Merge tag 'kvmarm-5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_host.h
index f81151a..65568b2 100644 (file)
@@ -66,19 +66,34 @@ struct kvm_vmid {
        u32    vmid;
 };
 
-struct kvm_arch {
+struct kvm_s2_mmu {
        struct kvm_vmid vmid;
 
-       /* stage2 entry level table */
-       pgd_t *pgd;
-       phys_addr_t pgd_phys;
-
-       /* VTCR_EL2 value for this VM */
-       u64    vtcr;
+       /*
+        * stage2 entry level table
+        *
+        * Two kvm_s2_mmu structures in the same VM can point to the same
+        * pgd here.  This happens when running a guest using a
+        * translation regime that isn't affected by its own stage-2
+        * translation, such as a non-VHE hypervisor running at vEL2, or
+        * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
+        * canonical stage-2 page tables.
+        */
+       pgd_t           *pgd;
+       phys_addr_t     pgd_phys;
 
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
+       struct kvm *kvm;
+};
+
+struct kvm_arch {
+       struct kvm_s2_mmu mmu;
+
+       /* VTCR_EL2 value for this VM */
+       u64    vtcr;
+
        /* The maximum number of vCPUs depends on the used GIC model */
        int max_vcpus;
 
@@ -159,6 +174,16 @@ enum vcpu_sysreg {
        APGAKEYLO_EL1,
        APGAKEYHI_EL1,
 
+       ELR_EL1,
+       SP_EL1,
+       SPSR_EL1,
+
+       CNTVOFF_EL2,
+       CNTV_CVAL_EL0,
+       CNTV_CTL_EL0,
+       CNTP_CVAL_EL0,
+       CNTP_CTL_EL0,
+
        /* 32bit specific registers. Keep them at the end of the range */
        DACR32_EL2,     /* Domain Access Control Register */
        IFSR32_EL2,     /* Instruction Fault Status Register */
@@ -210,7 +235,15 @@ enum vcpu_sysreg {
 #define NR_COPRO_REGS  (NR_SYS_REGS * 2)
 
 struct kvm_cpu_context {
-       struct kvm_regs gp_regs;
+       struct user_pt_regs regs;       /* sp = sp_el0 */
+
+       u64     spsr_abt;
+       u64     spsr_und;
+       u64     spsr_irq;
+       u64     spsr_fiq;
+
+       struct user_fpsimd_state fp_regs;
+
        union {
                u64 sys_regs[NR_SYS_REGS];
                u32 copro[NR_COPRO_REGS];
@@ -243,6 +276,9 @@ struct kvm_vcpu_arch {
        void *sve_state;
        unsigned int sve_max_vl;
 
+       /* Stage 2 paging state used by the hardware on next switch */
+       struct kvm_s2_mmu *hw_mmu;
+
        /* HYP configuration */
        u64 hcr_el2;
        u32 mdcr_el2;
@@ -327,7 +363,7 @@ struct kvm_vcpu_arch {
        struct vcpu_reset_state reset_state;
 
        /* True when deferrable sysregs are loaded on the physical CPU,
-        * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
+        * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
        bool sysregs_loaded_on_cpu;
 
        /* Guest PV state */
@@ -378,15 +414,20 @@ struct kvm_vcpu_arch {
 #define vcpu_has_ptrauth(vcpu)         false
 #endif
 
-#define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
+#define vcpu_gp_regs(v)                (&(v)->arch.ctxt.regs)
 
 /*
- * Only use __vcpu_sys_reg if you know you want the memory backed version of a
- * register, and not the one most recently accessed by a running VCPU.  For
- * example, for userspace access or for system registers that are never context
- * switched, but only emulated.
+ * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
+ * memory backed version of a register, and not the one most recently
+ * accessed by a running VCPU.  For example, for userspace access or
+ * for system registers that are never context switched, but only
+ * emulated.
  */
-#define __vcpu_sys_reg(v,r)    ((v)->arch.ctxt.sys_regs[(r)])
+#define __ctxt_sys_reg(c,r)    (&(c)->sys_regs[(r)])
+
+#define ctxt_sys_reg(c,r)      (*__ctxt_sys_reg(c,r))
+
+#define __vcpu_sys_reg(v,r)    (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
 
 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
@@ -442,6 +483,18 @@ void kvm_arm_resume_guest(struct kvm *kvm);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
 
+#define kvm_call_hyp_nvhe(f, ...)                                      \
+       do {                                                            \
+               DECLARE_KVM_NVHE_SYM(f);                                \
+               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
+       } while(0)
+
+#define kvm_call_hyp_nvhe_ret(f, ...)                                  \
+       ({                                                              \
+               DECLARE_KVM_NVHE_SYM(f);                                \
+               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
+       })
+
 /*
  * The couple of isb() below are there to guarantee the same behaviour
  * on VHE as on !VHE, where the eret to EL1 acts as a context
@@ -453,7 +506,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
                        f(__VA_ARGS__);                                 \
                        isb();                                          \
                } else {                                                \
-                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+                       kvm_call_hyp_nvhe(f, ##__VA_ARGS__);            \
                }                                                       \
        } while(0)
 
@@ -465,8 +518,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
                        ret = f(__VA_ARGS__);                           \
                        isb();                                          \
                } else {                                                \
-                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
-                                            ##__VA_ARGS__);            \
+                       ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);  \
                }                                                       \
                                                                        \
                ret;                                                    \
@@ -518,7 +570,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
 {
        /* The host's MPIDR is immutable, so let's set it up at boot time */
-       cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
+       ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
 }
 
 static inline bool kvm_arch_requires_vhe(void)
@@ -619,8 +671,8 @@ static inline int kvm_arm_have_ssbd(void)
        }
 }
 
-void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
-void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
+void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
 
 int kvm_set_ipa_limit(void);