arm64: Work around broken GCC 4.9 handling of "S" constraint
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_asm.h
index 7f7072f..8e5fa28 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef __ARM_KVM_ASM_H__
 #define __ARM_KVM_ASM_H__
 
+#include <asm/hyp_image.h>
 #include <asm/virt.h>
 
 #define ARM_EXIT_WITH_SERROR_BIT  31
 
 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
 
+#define KVM_HOST_SMCCC_ID(id)                                          \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_64,                            \
+                          ARM_SMCCC_OWNER_VENDOR_HYP,                  \
+                          (id))
+
+#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
+
+#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                   0
+#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run                   1
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context           2
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa         3
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid             4
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid       5
+#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff          6
+#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs                        7
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2                8
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr              9
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr             10
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs               11
+#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2               12
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs              13
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs           14
+
 #ifndef __ASSEMBLY__
 
 #include <linux/mm.h>
 
-/*
- * Translate name of a symbol defined in nVHE hyp to the name seen
- * by kernel proper. All nVHE symbols are prefixed by the build system
- * to avoid clashes with the VHE variants.
- */
-#define kvm_nvhe_sym(sym)      __kvm_nvhe_##sym
-
 #define DECLARE_KVM_VHE_SYM(sym)       extern char sym[]
 #define DECLARE_KVM_NVHE_SYM(sym)      extern char kvm_nvhe_sym(sym)[]
 
        DECLARE_KVM_VHE_SYM(sym);               \
        DECLARE_KVM_NVHE_SYM(sym)
 
+#define DECLARE_KVM_VHE_PER_CPU(type, sym)     \
+       DECLARE_PER_CPU(type, sym)
+#define DECLARE_KVM_NVHE_PER_CPU(type, sym)    \
+       DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
+
+#define DECLARE_KVM_HYP_PER_CPU(type, sym)     \
+       DECLARE_KVM_VHE_PER_CPU(type, sym);     \
+       DECLARE_KVM_NVHE_PER_CPU(type, sym)
+
+/*
+ * Compute pointer to a symbol defined in nVHE percpu region.
+ * Returns NULL if percpu memory has not been allocated yet.
+ */
+#define this_cpu_ptr_nvhe_sym(sym)     per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
+#define per_cpu_ptr_nvhe_sym(sym, cpu)                                         \
+       ({                                                                      \
+               unsigned long base, off;                                        \
+               base = kvm_arm_hyp_percpu_base[cpu];                            \
+               off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
+                     (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
+               base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
+       })
+
+#if defined(__KVM_NVHE_HYPERVISOR__)
+
+#define CHOOSE_NVHE_SYM(sym)   sym
+#define CHOOSE_HYP_SYM(sym)    CHOOSE_NVHE_SYM(sym)
+
+/* The nVHE hypervisor shouldn't even try to access VHE symbols */
+extern void *__nvhe_undefined_symbol;
+#define CHOOSE_VHE_SYM(sym)            __nvhe_undefined_symbol
+#define this_cpu_ptr_hyp_sym(sym)      (&__nvhe_undefined_symbol)
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (&__nvhe_undefined_symbol)
+
+#elif defined(__KVM_VHE_HYPERVISOR__)
+
 #define CHOOSE_VHE_SYM(sym)    sym
-#define CHOOSE_NVHE_SYM(sym)   kvm_nvhe_sym(sym)
+#define CHOOSE_HYP_SYM(sym)    CHOOSE_VHE_SYM(sym)
+
+/* The VHE hypervisor shouldn't even try to access nVHE symbols */
+extern void *__vhe_undefined_symbol;
+#define CHOOSE_NVHE_SYM(sym)           __vhe_undefined_symbol
+#define this_cpu_ptr_hyp_sym(sym)      (&__vhe_undefined_symbol)
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (&__vhe_undefined_symbol)
+
+#else
 
-#ifndef __KVM_NVHE_HYPERVISOR__
 /*
  * BIG FAT WARNINGS:
  *
  * - Don't let the nVHE hypervisor have access to this, as it will
  *   pick the *wrong* symbol (yes, it runs at EL2...).
  */
-#define CHOOSE_HYP_SYM(sym)    (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
+#define CHOOSE_HYP_SYM(sym)            (is_kernel_in_hyp_mode()        \
+                                          ? CHOOSE_VHE_SYM(sym)        \
                                           : CHOOSE_NVHE_SYM(sym))
-#else
-/* The nVHE hypervisor shouldn't even try to access anything */
-extern void *__nvhe_undefined_symbol;
-#define CHOOSE_HYP_SYM(sym)    __nvhe_undefined_symbol
+
+#define this_cpu_ptr_hyp_sym(sym)      (is_kernel_in_hyp_mode()        \
+                                          ? this_cpu_ptr(&sym)         \
+                                          : this_cpu_ptr_nvhe_sym(sym))
+
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (is_kernel_in_hyp_mode()        \
+                                          ? per_cpu_ptr(&sym, cpu)     \
+                                          : per_cpu_ptr_nvhe_sym(sym, cpu))
+
+#define CHOOSE_VHE_SYM(sym)    sym
+#define CHOOSE_NVHE_SYM(sym)   kvm_nvhe_sym(sym)
+
 #endif
 
 /* Translate a kernel address @ptr into its equivalent linear mapping */
@@ -95,10 +165,16 @@ struct kvm_vcpu;
 struct kvm_s2_mmu;
 
 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
+DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 #define __kvm_hyp_init         CHOOSE_NVHE_SYM(__kvm_hyp_init)
+#define __kvm_hyp_host_vector  CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
 #define __kvm_hyp_vector       CHOOSE_HYP_SYM(__kvm_hyp_vector)
 
+extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+DECLARE_KVM_NVHE_SYM(__per_cpu_start);
+DECLARE_KVM_NVHE_SYM(__per_cpu_end);
+
 extern atomic_t arm64_el2_vector_last_slot;
 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
 #define __bp_harden_hyp_vecs   CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
@@ -124,6 +200,12 @@ extern u32 __kvm_get_mdcr_el2(void);
 
 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 
+#if defined(GCC_VERSION) && GCC_VERSION < 50000
+#define SYM_CONSTRAINT "i"
+#else
+#define SYM_CONSTRAINT "S"
+#endif
+
 /*
  * Obtain the PC-relative address of a kernel symbol
  * s: symbol
@@ -140,30 +222,10 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
                typeof(s) *addr;                                        \
                asm("adrp       %0, %1\n"                               \
                    "add        %0, %0, :lo12:%1\n"                     \
-                   : "=r" (addr) : "S" (&s));                          \
+                   : "=r" (addr) : SYM_CONSTRAINT (&s));               \
                addr;                                                   \
        })
 
-/*
- * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
- * provided that sym is really a *symbol* and not a pointer obtained from
- * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
- * sparse quiet.
- */
-#define __hyp_this_cpu_ptr(sym)                                                \
-       ({                                                              \
-               void *__ptr;                                            \
-               __verify_pcpu_ptr(&sym);                                \
-               __ptr = hyp_symbol_addr(sym);                           \
-               __ptr += read_sysreg(tpidr_el2);                        \
-               (typeof(sym) __kernel __force *)__ptr;                  \
-        })
-
-#define __hyp_this_cpu_read(sym)                                       \
-       ({                                                              \
-               *__hyp_this_cpu_ptr(sym);                               \
-        })
-
 #define __KVM_EXTABLE(from, to)                                                \
        "       .pushsection    __kvm_ex_table, \"a\"\n"                \
        "       .align          3\n"                                    \
@@ -194,20 +256,8 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 
 #else /* __ASSEMBLY__ */
 
-.macro hyp_adr_this_cpu reg, sym, tmp
-       adr_l   \reg, \sym
-       mrs     \tmp, tpidr_el2
-       add     \reg, \reg, \tmp
-.endm
-
-.macro hyp_ldr_this_cpu reg, sym, tmp
-       adr_l   \reg, \sym
-       mrs     \tmp, tpidr_el2
-       ldr     \reg,  [\reg, \tmp]
-.endm
-
 .macro get_host_ctxt reg, tmp
-       hyp_adr_this_cpu \reg, kvm_host_data, \tmp
+       adr_this_cpu \reg, kvm_host_data, \tmp
        add     \reg, \reg, #HOST_DATA_CONTEXT
 .endm
 
@@ -216,6 +266,16 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 .endm
 
+.macro get_loaded_vcpu vcpu, ctxt
+       adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
+       ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
+.macro set_loaded_vcpu vcpu, ctxt, tmp
+       adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
+       str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
 /*
  * KVM extable for unexpected exceptions.
  * In the same format _asm_extable, but output to a different section so that
@@ -231,6 +291,45 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
        .popsection
 .endm
 
+#define CPU_XREG_OFFSET(x)     (CPU_USER_PT_REGS + 8*x)
+#define CPU_LR_OFFSET          CPU_XREG_OFFSET(30)
+#define CPU_SP_EL0_OFFSET      (CPU_LR_OFFSET + 8)
+
+/*
+ * We treat x18 as callee-saved as the host may use it as a platform
+ * register (e.g. for shadow call stack).
+ */
+.macro save_callee_saved_regs ctxt
+       str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
+       stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_callee_saved_regs ctxt
+       // We require \ctxt is not x18-x28
+       ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
+       ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro save_sp_el0 ctxt, tmp
+       mrs     \tmp,   sp_el0
+       str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
+.endm
+
+.macro restore_sp_el0 ctxt, tmp
+       ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
+       msr     sp_el0, \tmp
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */