KVM: arm64: Add ioctl to fetch/store tags in a guest
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_host.h
index 3d10e65..c93a719 100644 (file)
@@ -94,7 +94,7 @@ struct kvm_s2_mmu {
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
-       struct kvm *kvm;
+       struct kvm_arch *arch;
 };
 
 struct kvm_arch_memory_slot {
@@ -132,6 +132,9 @@ struct kvm_arch {
 
        u8 pfr0_csv2;
        u8 pfr0_csv3;
+
+       /* Memory Tagging Extension enabled for the guest */
+       bool mte_enabled;
 };
 
 struct kvm_vcpu_fault_info {
@@ -206,6 +209,12 @@ enum vcpu_sysreg {
        CNTP_CVAL_EL0,
        CNTP_CTL_EL0,
 
+       /* Memory Tagging Extension registers */
+       RGSR_EL1,       /* Random Allocation Tag Seed Register */
+       GCR_EL1,        /* Tag Control Register */
+       TFSR_EL1,       /* Tag Fault Status Register (EL1) */
+       TFSRE0_EL1,     /* Tag Fault Status Register (EL0) */
+
        /* 32bit specific registers. Keep them at the end of the range */
        DACR32_EL2,     /* Domain Access Control Register */
        IFSR32_EL2,     /* Instruction Fault Status Register */
@@ -315,6 +324,8 @@ struct kvm_vcpu_arch {
                struct kvm_guest_debug_arch regs;
                /* Statistical profiling extension */
                u64 pmscr_el1;
+               /* Self-hosted trace */
+               u64 trfcr_el1;
        } host_debug_state;
 
        /* VGIC state */
@@ -372,8 +383,10 @@ struct kvm_vcpu_arch {
 };
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
-#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
-                                     sve_ffr_offset((vcpu)->arch.sve_max_vl)))
+#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +     \
+                            sve_ffr_offset((vcpu)->arch.sve_max_vl))
+
+#define vcpu_sve_max_vq(vcpu)  sve_vq_from_vl((vcpu)->arch.sve_max_vl)
 
 #define vcpu_sve_state_size(vcpu) ({                                   \
        size_t __size_ret;                                              \
@@ -382,7 +395,7 @@ struct kvm_vcpu_arch {
        if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
                __size_ret = 0;                                         \
        } else {                                                        \
-               __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);    \
+               __vcpu_vq = vcpu_sve_max_vq(vcpu);                      \
                __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
        }                                                               \
                                                                        \
@@ -400,7 +413,13 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_GUEST_HAS_PTRAUTH    (1 << 7) /* PTRAUTH exposed to guest */
 #define KVM_ARM64_PENDING_EXCEPTION    (1 << 8) /* Exception pending */
 #define KVM_ARM64_EXCEPT_MASK          (7 << 9) /* Target EL/MODE */
+#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active  */
+#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE        (1 << 13) /* Save TRBE context if active  */
 
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+                                KVM_GUESTDBG_USE_SW_BP | \
+                                KVM_GUESTDBG_USE_HW | \
+                                KVM_GUESTDBG_SINGLESTEP)
 /*
  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
  * take the following values:
@@ -582,15 +601,11 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end, unsigned flags);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
+#ifndef __KVM_NVHE_HYPERVISOR__
 #define kvm_call_hyp_nvhe(f, ...)                                              \
        ({                                                              \
                struct arm_smccc_res res;                               \
@@ -630,9 +645,13 @@ void kvm_arm_resume_guest(struct kvm *kvm);
                                                                        \
                ret;                                                    \
        })
+#else /* __KVM_NVHE_HYPERVISOR__ */
+#define kvm_call_hyp(f, ...) f(__VA_ARGS__)
+#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
+#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
+#endif /* __KVM_NVHE_HYPERVISOR__ */
 
 void force_vm_exit(const cpumask_t *mask);
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
@@ -692,19 +711,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
        ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
 }
 
-static inline bool kvm_arch_requires_vhe(void)
-{
-       /*
-        * The Arm architecture specifies that implementation of SVE
-        * requires VHE also to be implemented.  The KVM code for arm64
-        * relies on this when SVE is present:
-        */
-       if (system_supports_sve())
-               return true;
-
-       return false;
-}
-
 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
 
 static inline void kvm_arch_hardware_unsetup(void) {}
@@ -713,6 +719,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 void kvm_arm_init_debug(void);
+void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
@@ -723,6 +730,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
 
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+                               struct kvm_arm_copy_mte_tags *copy_tags);
+
 /* Guest/host FPSIMD coordination helpers */
 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -734,6 +744,10 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
        return (!has_vhe() && attr->exclude_host);
 }
 
+/* Flags for host debug state */
+void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
+
 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 {
@@ -767,9 +781,17 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 #define kvm_arm_vcpu_sve_finalized(vcpu) \
        ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
 
+#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
 #define kvm_vcpu_has_pmu(vcpu)                                 \
        (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
 
 int kvm_trng_call(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_KVM
+extern phys_addr_t hyp_mem_base;
+extern phys_addr_t hyp_mem_size;
+void __init kvm_hyp_reserve(void);
+#else
+static inline void kvm_hyp_reserve(void) { }
+#endif
 
 #endif /* __ARM64_KVM_HOST_H__ */