Merge remote-tracking branch 'coresight/next-ETE-TRBE' into kvmarm-master/next
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_host.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/thread_info.h>
30
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40
41 #define KVM_VCPU_MAX_FEATURES 7
42
43 #define KVM_REQ_SLEEP \
44         KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING     KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET      KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL    KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4    KVM_ARCH_REQ(4)
49
50 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51                                      KVM_DIRTY_LOG_INITIALLY_SET)
52
53 /*
54  * Mode of operation configurable with kvm-arm.mode early param.
55  * See Documentation/admin-guide/kernel-parameters.txt for more information.
56  */
57 enum kvm_mode {
58         KVM_MODE_DEFAULT,
59         KVM_MODE_PROTECTED,
60 };
61 enum kvm_mode kvm_get_mode(void);
62
63 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
64
65 extern unsigned int kvm_sve_max_vl;
66 int kvm_arm_init_sve(void);
67
68 int __attribute_const__ kvm_target_cpu(void);
69 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
70 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
71
72 struct kvm_vmid {
73         /* The VMID generation used for the virt. memory system */
74         u64    vmid_gen;
75         u32    vmid;
76 };
77
78 struct kvm_s2_mmu {
79         struct kvm_vmid vmid;
80
81         /*
82          * stage2 entry level table
83          *
84          * Two kvm_s2_mmu structures in the same VM can point to the same
85          * pgd here.  This happens when running a guest using a
86          * translation regime that isn't affected by its own stage-2
87          * translation, such as a non-VHE hypervisor running at vEL2, or
88          * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
89          * canonical stage-2 page tables.
90          */
91         phys_addr_t     pgd_phys;
92         struct kvm_pgtable *pgt;
93
94         /* The last vcpu id that ran on each physical CPU */
95         int __percpu *last_vcpu_ran;
96
97         struct kvm *kvm;
98 };
99
100 struct kvm_arch_memory_slot {
101 };
102
103 struct kvm_arch {
104         struct kvm_s2_mmu mmu;
105
106         /* VTCR_EL2 value for this VM */
107         u64    vtcr;
108
109         /* The maximum number of vCPUs depends on the used GIC model */
110         int max_vcpus;
111
112         /* Interrupt controller */
113         struct vgic_dist        vgic;
114
115         /* Mandated version of PSCI */
116         u32 psci_version;
117
118         /*
119          * If we encounter a data abort without valid instruction syndrome
120          * information, report this to user space.  User space can (and
121          * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
122          * supported.
123          */
124         bool return_nisv_io_abort_to_user;
125
126         /*
127          * VM-wide PMU filter, implemented as a bitmap and big enough for
128          * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
129          */
130         unsigned long *pmu_filter;
131         unsigned int pmuver;
132
133         u8 pfr0_csv2;
134         u8 pfr0_csv3;
135 };
136
137 struct kvm_vcpu_fault_info {
138         u32 esr_el2;            /* Hyp Syndrom Register */
139         u64 far_el2;            /* Hyp Fault Address Register */
140         u64 hpfar_el2;          /* Hyp IPA Fault Address Register */
141         u64 disr_el1;           /* Deferred [SError] Status Register */
142 };
143
144 enum vcpu_sysreg {
145         __INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
146         MPIDR_EL1,      /* MultiProcessor Affinity Register */
147         CSSELR_EL1,     /* Cache Size Selection Register */
148         SCTLR_EL1,      /* System Control Register */
149         ACTLR_EL1,      /* Auxiliary Control Register */
150         CPACR_EL1,      /* Coprocessor Access Control */
151         ZCR_EL1,        /* SVE Control */
152         TTBR0_EL1,      /* Translation Table Base Register 0 */
153         TTBR1_EL1,      /* Translation Table Base Register 1 */
154         TCR_EL1,        /* Translation Control Register */
155         ESR_EL1,        /* Exception Syndrome Register */
156         AFSR0_EL1,      /* Auxiliary Fault Status Register 0 */
157         AFSR1_EL1,      /* Auxiliary Fault Status Register 1 */
158         FAR_EL1,        /* Fault Address Register */
159         MAIR_EL1,       /* Memory Attribute Indirection Register */
160         VBAR_EL1,       /* Vector Base Address Register */
161         CONTEXTIDR_EL1, /* Context ID Register */
162         TPIDR_EL0,      /* Thread ID, User R/W */
163         TPIDRRO_EL0,    /* Thread ID, User R/O */
164         TPIDR_EL1,      /* Thread ID, Privileged */
165         AMAIR_EL1,      /* Aux Memory Attribute Indirection Register */
166         CNTKCTL_EL1,    /* Timer Control Register (EL1) */
167         PAR_EL1,        /* Physical Address Register */
168         MDSCR_EL1,      /* Monitor Debug System Control Register */
169         MDCCINT_EL1,    /* Monitor Debug Comms Channel Interrupt Enable Reg */
170         DISR_EL1,       /* Deferred Interrupt Status Register */
171
172         /* Performance Monitors Registers */
173         PMCR_EL0,       /* Control Register */
174         PMSELR_EL0,     /* Event Counter Selection Register */
175         PMEVCNTR0_EL0,  /* Event Counter Register (0-30) */
176         PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
177         PMCCNTR_EL0,    /* Cycle Counter Register */
178         PMEVTYPER0_EL0, /* Event Type Register (0-30) */
179         PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
180         PMCCFILTR_EL0,  /* Cycle Count Filter Register */
181         PMCNTENSET_EL0, /* Count Enable Set Register */
182         PMINTENSET_EL1, /* Interrupt Enable Set Register */
183         PMOVSSET_EL0,   /* Overflow Flag Status Set Register */
184         PMSWINC_EL0,    /* Software Increment Register */
185         PMUSERENR_EL0,  /* User Enable Register */
186
187         /* Pointer Authentication Registers in a strict increasing order. */
188         APIAKEYLO_EL1,
189         APIAKEYHI_EL1,
190         APIBKEYLO_EL1,
191         APIBKEYHI_EL1,
192         APDAKEYLO_EL1,
193         APDAKEYHI_EL1,
194         APDBKEYLO_EL1,
195         APDBKEYHI_EL1,
196         APGAKEYLO_EL1,
197         APGAKEYHI_EL1,
198
199         ELR_EL1,
200         SP_EL1,
201         SPSR_EL1,
202
203         CNTVOFF_EL2,
204         CNTV_CVAL_EL0,
205         CNTV_CTL_EL0,
206         CNTP_CVAL_EL0,
207         CNTP_CTL_EL0,
208
209         /* 32bit specific registers. Keep them at the end of the range */
210         DACR32_EL2,     /* Domain Access Control Register */
211         IFSR32_EL2,     /* Instruction Fault Status Register */
212         FPEXC32_EL2,    /* Floating-Point Exception Control Register */
213         DBGVCR32_EL2,   /* Debug Vector Catch Register */
214
215         NR_SYS_REGS     /* Nothing after this line! */
216 };
217
218 struct kvm_cpu_context {
219         struct user_pt_regs regs;       /* sp = sp_el0 */
220
221         u64     spsr_abt;
222         u64     spsr_und;
223         u64     spsr_irq;
224         u64     spsr_fiq;
225
226         struct user_fpsimd_state fp_regs;
227
228         u64 sys_regs[NR_SYS_REGS];
229
230         struct kvm_vcpu *__hyp_running_vcpu;
231 };
232
233 struct kvm_pmu_events {
234         u32 events_host;
235         u32 events_guest;
236 };
237
238 struct kvm_host_data {
239         struct kvm_cpu_context host_ctxt;
240         struct kvm_pmu_events pmu_events;
241 };
242
243 struct kvm_host_psci_config {
244         /* PSCI version used by host. */
245         u32 version;
246
247         /* Function IDs used by host if version is v0.1. */
248         struct psci_0_1_function_ids function_ids_0_1;
249
250         bool psci_0_1_cpu_suspend_implemented;
251         bool psci_0_1_cpu_on_implemented;
252         bool psci_0_1_cpu_off_implemented;
253         bool psci_0_1_migrate_implemented;
254 };
255
256 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
257 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
258
259 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
260 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
261
262 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
263 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
264
265 struct vcpu_reset_state {
266         unsigned long   pc;
267         unsigned long   r0;
268         bool            be;
269         bool            reset;
270 };
271
272 struct kvm_vcpu_arch {
273         struct kvm_cpu_context ctxt;
274         void *sve_state;
275         unsigned int sve_max_vl;
276
277         /* Stage 2 paging state used by the hardware on next switch */
278         struct kvm_s2_mmu *hw_mmu;
279
280         /* HYP configuration */
281         u64 hcr_el2;
282         u32 mdcr_el2;
283
284         /* Exception Information */
285         struct kvm_vcpu_fault_info fault;
286
287         /* State of various workarounds, see kvm_asm.h for bit assignment */
288         u64 workaround_flags;
289
290         /* Miscellaneous vcpu state flags */
291         u64 flags;
292
293         /*
294          * We maintain more than a single set of debug registers to support
295          * debugging the guest from the host and to maintain separate host and
296          * guest state during world switches. vcpu_debug_state are the debug
297          * registers of the vcpu as the guest sees them.  host_debug_state are
298          * the host registers which are saved and restored during
299          * world switches. external_debug_state contains the debug
300          * values we want to debug the guest. This is set via the
301          * KVM_SET_GUEST_DEBUG ioctl.
302          *
303          * debug_ptr points to the set of debug registers that should be loaded
304          * onto the hardware when running the guest.
305          */
306         struct kvm_guest_debug_arch *debug_ptr;
307         struct kvm_guest_debug_arch vcpu_debug_state;
308         struct kvm_guest_debug_arch external_debug_state;
309
310         struct thread_info *host_thread_info;   /* hyp VA */
311         struct user_fpsimd_state *host_fpsimd_state;    /* hyp VA */
312
313         struct {
314                 /* {Break,watch}point registers */
315                 struct kvm_guest_debug_arch regs;
316                 /* Statistical profiling extension */
317                 u64 pmscr_el1;
318                 /* Self-hosted trace */
319                 u64 trfcr_el1;
320         } host_debug_state;
321
322         /* VGIC state */
323         struct vgic_cpu vgic_cpu;
324         struct arch_timer_cpu timer_cpu;
325         struct kvm_pmu pmu;
326
327         /*
328          * Anything that is not used directly from assembly code goes
329          * here.
330          */
331
332         /*
333          * Guest registers we preserve during guest debugging.
334          *
335          * These shadow registers are updated by the kvm_handle_sys_reg
336          * trap handler if the guest accesses or updates them while we
337          * are using guest debug.
338          */
339         struct {
340                 u32     mdscr_el1;
341         } guest_debug_preserved;
342
343         /* vcpu power-off state */
344         bool power_off;
345
346         /* Don't run the guest (internal implementation need) */
347         bool pause;
348
349         /* Cache some mmu pages needed inside spinlock regions */
350         struct kvm_mmu_memory_cache mmu_page_cache;
351
352         /* Target CPU and feature flags */
353         int target;
354         DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
355
356         /* Detect first run of a vcpu */
357         bool has_run_once;
358
359         /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
360         u64 vsesr_el2;
361
362         /* Additional reset state */
363         struct vcpu_reset_state reset_state;
364
365         /* True when deferrable sysregs are loaded on the physical CPU,
366          * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
367         bool sysregs_loaded_on_cpu;
368
369         /* Guest PV state */
370         struct {
371                 u64 last_steal;
372                 gpa_t base;
373         } steal;
374 };
375
376 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
377 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
378                                       sve_ffr_offset((vcpu)->arch.sve_max_vl)))
379
380 #define vcpu_sve_state_size(vcpu) ({                                    \
381         size_t __size_ret;                                              \
382         unsigned int __vcpu_vq;                                         \
383                                                                         \
384         if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
385                 __size_ret = 0;                                         \
386         } else {                                                        \
387                 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);    \
388                 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
389         }                                                               \
390                                                                         \
391         __size_ret;                                                     \
392 })
393
394 /* vcpu_arch flags field values: */
395 #define KVM_ARM64_DEBUG_DIRTY           (1 << 0)
396 #define KVM_ARM64_FP_ENABLED            (1 << 1) /* guest FP regs loaded */
397 #define KVM_ARM64_FP_HOST               (1 << 2) /* host FP regs loaded */
398 #define KVM_ARM64_HOST_SVE_IN_USE       (1 << 3) /* backup for host TIF_SVE */
399 #define KVM_ARM64_HOST_SVE_ENABLED      (1 << 4) /* SVE enabled for EL0 */
400 #define KVM_ARM64_GUEST_HAS_SVE         (1 << 5) /* SVE exposed to guest */
401 #define KVM_ARM64_VCPU_SVE_FINALIZED    (1 << 6) /* SVE config completed */
402 #define KVM_ARM64_GUEST_HAS_PTRAUTH     (1 << 7) /* PTRAUTH exposed to guest */
403 #define KVM_ARM64_PENDING_EXCEPTION     (1 << 8) /* Exception pending */
404 #define KVM_ARM64_EXCEPT_MASK           (7 << 9) /* Target EL/MODE */
405 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE  (1 << 12) /* Save SPE context if active  */
406 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active  */
407
408 /*
409  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
410  * take the following values:
411  *
412  * For AArch32 EL1:
413  */
414 #define KVM_ARM64_EXCEPT_AA32_UND       (0 << 9)
415 #define KVM_ARM64_EXCEPT_AA32_IABT      (1 << 9)
416 #define KVM_ARM64_EXCEPT_AA32_DABT      (2 << 9)
417 /* For AArch64: */
418 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC  (0 << 9)
419 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ   (1 << 9)
420 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ   (2 << 9)
421 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR  (3 << 9)
422 #define KVM_ARM64_EXCEPT_AA64_EL1       (0 << 11)
423 #define KVM_ARM64_EXCEPT_AA64_EL2       (1 << 11)
424
425 /*
426  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
427  * set together with an exception...
428  */
429 #define KVM_ARM64_INCREMENT_PC          (1 << 9) /* Increment PC */
430
431 #define vcpu_has_sve(vcpu) (system_supports_sve() &&                    \
432                             ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
433
434 #ifdef CONFIG_ARM64_PTR_AUTH
435 #define vcpu_has_ptrauth(vcpu)                                          \
436         ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||                \
437           cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&               \
438          (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
439 #else
440 #define vcpu_has_ptrauth(vcpu)          false
441 #endif
442
443 #define vcpu_gp_regs(v)         (&(v)->arch.ctxt.regs)
444
445 /*
446  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
447  * memory backed version of a register, and not the one most recently
448  * accessed by a running VCPU.  For example, for userspace access or
449  * for system registers that are never context switched, but only
450  * emulated.
451  */
452 #define __ctxt_sys_reg(c,r)     (&(c)->sys_regs[(r)])
453
454 #define ctxt_sys_reg(c,r)       (*__ctxt_sys_reg(c,r))
455
456 #define __vcpu_sys_reg(v,r)     (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
457
458 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
459 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
460
461 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
462 {
463         /*
464          * *** VHE ONLY ***
465          *
466          * System registers listed in the switch are not saved on every
467          * exit from the guest but are only saved on vcpu_put.
468          *
469          * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
470          * should never be listed below, because the guest cannot modify its
471          * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
472          * thread when emulating cross-VCPU communication.
473          */
474         if (!has_vhe())
475                 return false;
476
477         switch (reg) {
478         case CSSELR_EL1:        *val = read_sysreg_s(SYS_CSSELR_EL1);   break;
479         case SCTLR_EL1:         *val = read_sysreg_s(SYS_SCTLR_EL12);   break;
480         case CPACR_EL1:         *val = read_sysreg_s(SYS_CPACR_EL12);   break;
481         case TTBR0_EL1:         *val = read_sysreg_s(SYS_TTBR0_EL12);   break;
482         case TTBR1_EL1:         *val = read_sysreg_s(SYS_TTBR1_EL12);   break;
483         case TCR_EL1:           *val = read_sysreg_s(SYS_TCR_EL12);     break;
484         case ESR_EL1:           *val = read_sysreg_s(SYS_ESR_EL12);     break;
485         case AFSR0_EL1:         *val = read_sysreg_s(SYS_AFSR0_EL12);   break;
486         case AFSR1_EL1:         *val = read_sysreg_s(SYS_AFSR1_EL12);   break;
487         case FAR_EL1:           *val = read_sysreg_s(SYS_FAR_EL12);     break;
488         case MAIR_EL1:          *val = read_sysreg_s(SYS_MAIR_EL12);    break;
489         case VBAR_EL1:          *val = read_sysreg_s(SYS_VBAR_EL12);    break;
490         case CONTEXTIDR_EL1:    *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
491         case TPIDR_EL0:         *val = read_sysreg_s(SYS_TPIDR_EL0);    break;
492         case TPIDRRO_EL0:       *val = read_sysreg_s(SYS_TPIDRRO_EL0);  break;
493         case TPIDR_EL1:         *val = read_sysreg_s(SYS_TPIDR_EL1);    break;
494         case AMAIR_EL1:         *val = read_sysreg_s(SYS_AMAIR_EL12);   break;
495         case CNTKCTL_EL1:       *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
496         case ELR_EL1:           *val = read_sysreg_s(SYS_ELR_EL12);     break;
497         case PAR_EL1:           *val = read_sysreg_par();               break;
498         case DACR32_EL2:        *val = read_sysreg_s(SYS_DACR32_EL2);   break;
499         case IFSR32_EL2:        *val = read_sysreg_s(SYS_IFSR32_EL2);   break;
500         case DBGVCR32_EL2:      *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
501         default:                return false;
502         }
503
504         return true;
505 }
506
507 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
508 {
509         /*
510          * *** VHE ONLY ***
511          *
512          * System registers listed in the switch are not restored on every
513          * entry to the guest but are only restored on vcpu_load.
514          *
515          * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
516          * should never be listed below, because the MPIDR should only be set
517          * once, before running the VCPU, and never changed later.
518          */
519         if (!has_vhe())
520                 return false;
521
522         switch (reg) {
523         case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    break;
524         case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    break;
525         case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    break;
526         case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    break;
527         case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    break;
528         case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      break;
529         case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      break;
530         case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    break;
531         case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    break;
532         case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      break;
533         case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     break;
534         case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     break;
535         case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
536         case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     break;
537         case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   break;
538         case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     break;
539         case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    break;
540         case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  break;
541         case ELR_EL1:           write_sysreg_s(val, SYS_ELR_EL12);      break;
542         case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       break;
543         case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    break;
544         case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    break;
545         case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  break;
546         default:                return false;
547         }
548
549         return true;
550 }
551
552 struct kvm_vm_stat {
553         ulong remote_tlb_flush;
554 };
555
556 struct kvm_vcpu_stat {
557         u64 halt_successful_poll;
558         u64 halt_attempted_poll;
559         u64 halt_poll_success_ns;
560         u64 halt_poll_fail_ns;
561         u64 halt_poll_invalid;
562         u64 halt_wakeup;
563         u64 hvc_exit_stat;
564         u64 wfe_exit_stat;
565         u64 wfi_exit_stat;
566         u64 mmio_exit_user;
567         u64 mmio_exit_kernel;
568         u64 exits;
569 };
570
571 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
572 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
573 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
574 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
575 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
576
577 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
578 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
579 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
580 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
581
582 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
583                               struct kvm_vcpu_events *events);
584
585 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
586                               struct kvm_vcpu_events *events);
587
588 #define KVM_ARCH_WANT_MMU_NOTIFIER
589 int kvm_unmap_hva_range(struct kvm *kvm,
590                         unsigned long start, unsigned long end, unsigned flags);
591 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
592 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
593 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
594
595 void kvm_arm_halt_guest(struct kvm *kvm);
596 void kvm_arm_resume_guest(struct kvm *kvm);
597
598 #define kvm_call_hyp_nvhe(f, ...)                                               \
599         ({                                                              \
600                 struct arm_smccc_res res;                               \
601                                                                         \
602                 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),               \
603                                   ##__VA_ARGS__, &res);                 \
604                 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);                   \
605                                                                         \
606                 res.a1;                                                 \
607         })
608
609 /*
610  * The couple of isb() below are there to guarantee the same behaviour
611  * on VHE as on !VHE, where the eret to EL1 acts as a context
612  * synchronization event.
613  */
614 #define kvm_call_hyp(f, ...)                                            \
615         do {                                                            \
616                 if (has_vhe()) {                                        \
617                         f(__VA_ARGS__);                                 \
618                         isb();                                          \
619                 } else {                                                \
620                         kvm_call_hyp_nvhe(f, ##__VA_ARGS__);            \
621                 }                                                       \
622         } while(0)
623
624 #define kvm_call_hyp_ret(f, ...)                                        \
625         ({                                                              \
626                 typeof(f(__VA_ARGS__)) ret;                             \
627                                                                         \
628                 if (has_vhe()) {                                        \
629                         ret = f(__VA_ARGS__);                           \
630                         isb();                                          \
631                 } else {                                                \
632                         ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);      \
633                 }                                                       \
634                                                                         \
635                 ret;                                                    \
636         })
637
638 void force_vm_exit(const cpumask_t *mask);
639 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
640
641 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
642 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
643
644 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
645 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
646 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
647 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
648 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
649 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
650
651 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
652
653 void kvm_sys_reg_table_init(void);
654
655 /* MMIO helpers */
656 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
657 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
658
659 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
660 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
661
662 int kvm_perf_init(void);
663 int kvm_perf_teardown(void);
664
665 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
666 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
667 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
668
669 bool kvm_arm_pvtime_supported(void);
670 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
671                             struct kvm_device_attr *attr);
672 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
673                             struct kvm_device_attr *attr);
674 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
675                             struct kvm_device_attr *attr);
676
677 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
678 {
679         vcpu_arch->steal.base = GPA_INVALID;
680 }
681
682 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
683 {
684         return (vcpu_arch->steal.base != GPA_INVALID);
685 }
686
687 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
688
689 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
690
691 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
692
693 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
694 {
695         /* The host's MPIDR is immutable, so let's set it up at boot time */
696         ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
697 }
698
699 static inline bool kvm_arch_requires_vhe(void)
700 {
701         /*
702          * The Arm architecture specifies that implementation of SVE
703          * requires VHE also to be implemented.  The KVM code for arm64
704          * relies on this when SVE is present:
705          */
706         if (system_supports_sve())
707                 return true;
708
709         return false;
710 }
711
712 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
713
714 static inline void kvm_arch_hardware_unsetup(void) {}
715 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
716 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
717 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
718
719 void kvm_arm_init_debug(void);
720 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
721 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
722 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
723 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
724                                struct kvm_device_attr *attr);
725 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
726                                struct kvm_device_attr *attr);
727 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
728                                struct kvm_device_attr *attr);
729
730 /* Guest/host FPSIMD coordination helpers */
731 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
732 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
733 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
734 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
735
736 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
737 {
738         return (!has_vhe() && attr->exclude_host);
739 }
740
741 /* Flags for host debug state */
742 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
743 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
744
745 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
746 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
747 {
748         return kvm_arch_vcpu_run_map_fp(vcpu);
749 }
750
751 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
752 void kvm_clr_pmu_events(u32 clr);
753
754 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
755 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
756 #else
757 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
758 static inline void kvm_clr_pmu_events(u32 clr) {}
759 #endif
760
761 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
762 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
763
764 int kvm_set_ipa_limit(void);
765
766 #define __KVM_HAVE_ARCH_VM_ALLOC
767 struct kvm *kvm_arch_alloc_vm(void);
768 void kvm_arch_free_vm(struct kvm *kvm);
769
770 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
771
772 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
773 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
774
775 #define kvm_arm_vcpu_sve_finalized(vcpu) \
776         ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
777
778 #define kvm_vcpu_has_pmu(vcpu)                                  \
779         (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
780
781 int kvm_trng_call(struct kvm_vcpu *vcpu);
782
783 #endif /* __ARM64_KVM_HOST_H__ */