KVM: arm64: Move 'struct kvm_arch_memory_slot' out of uapi/
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_host.h
index bb5e5b8..32db719 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARM64_KVM_HOST_H__
 #define __ARM64_KVM_HOST_H__
 
+#include <linux/arm-smccc.h>
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
@@ -79,8 +80,8 @@ struct kvm_s2_mmu {
         * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
         * canonical stage-2 page tables.
         */
-       pgd_t           *pgd;
        phys_addr_t     pgd_phys;
+       struct kvm_pgtable *pgt;
 
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
@@ -88,6 +89,9 @@ struct kvm_s2_mmu {
        struct kvm *kvm;
 };
 
+struct kvm_arch_memory_slot {
+};
+
 struct kvm_arch {
        struct kvm_s2_mmu mmu;
 
@@ -110,6 +114,13 @@ struct kvm_arch {
         * supported.
         */
        bool return_nisv_io_abort_to_user;
+
+       /*
+        * VM-wide PMU filter, implemented as a bitmap and big enough for
+        * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
+        */
+       unsigned long *pmu_filter;
+       unsigned int pmuver;
 };
 
 struct kvm_vcpu_fault_info {
@@ -231,6 +242,7 @@ enum vcpu_sysreg {
 #define cp14_DBGWCR0   (DBGWCR0_EL1 * 2)
 #define cp14_DBGWVR0   (DBGWVR0_EL1 * 2)
 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+#define cp14_DBGVCR    (DBGVCR32_EL2 * 2)
 
 #define NR_COPRO_REGS  (NR_SYS_REGS * 2)
 
@@ -262,8 +274,6 @@ struct kvm_host_data {
        struct kvm_pmu_events pmu_events;
 };
 
-typedef struct kvm_host_data kvm_host_data_t;
-
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
@@ -480,18 +490,15 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
-u64 __kvm_call_hyp(void *hypfn, ...);
-
-#define kvm_call_hyp_nvhe(f, ...)                                      \
-       do {                                                            \
-               DECLARE_KVM_NVHE_SYM(f);                                \
-               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
-       } while(0)
-
-#define kvm_call_hyp_nvhe_ret(f, ...)                                  \
+#define kvm_call_hyp_nvhe(f, ...)                                              \
        ({                                                              \
-               DECLARE_KVM_NVHE_SYM(f);                                \
-               __kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);    \
+               struct arm_smccc_res res;                               \
+                                                                       \
+               arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),               \
+                                 ##__VA_ARGS__, &res);                 \
+               WARN_ON(res.a0 != SMCCC_RET_SUCCESS);                   \
+                                                                       \
+               res.a1;                                                 \
        })
 
 /*
@@ -517,7 +524,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
                        ret = f(__VA_ARGS__);                           \
                        isb();                                          \
                } else {                                                \
-                       ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);  \
+                       ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);      \
                }                                                       \
                                                                        \
                ret;                                                    \
@@ -565,7 +572,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
-DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
+DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
 
 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
 {