Merge tag 'keys-cve-2020-26541-v3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_host.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/thread_info.h>
30
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40
41 #define KVM_VCPU_MAX_FEATURES 7
42
43 #define KVM_REQ_SLEEP \
44         KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING     KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET      KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL    KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4    KVM_ARCH_REQ(4)
49
50 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51                                      KVM_DIRTY_LOG_INITIALLY_SET)
52
53 /*
54  * Mode of operation configurable with kvm-arm.mode early param.
55  * See Documentation/admin-guide/kernel-parameters.txt for more information.
56  */
57 enum kvm_mode {
58         KVM_MODE_DEFAULT,
59         KVM_MODE_PROTECTED,
60 };
61 enum kvm_mode kvm_get_mode(void);
62
63 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
64
65 extern unsigned int kvm_sve_max_vl;
66 int kvm_arm_init_sve(void);
67
68 int __attribute_const__ kvm_target_cpu(void);
69 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
70 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
71
72 struct kvm_vmid {
73         /* The VMID generation used for the virt. memory system */
74         u64    vmid_gen;
75         u32    vmid;
76 };
77
78 struct kvm_s2_mmu {
79         struct kvm_vmid vmid;
80
81         /*
82          * stage2 entry level table
83          *
84          * Two kvm_s2_mmu structures in the same VM can point to the same
85          * pgd here.  This happens when running a guest using a
86          * translation regime that isn't affected by its own stage-2
87          * translation, such as a non-VHE hypervisor running at vEL2, or
88          * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
89          * canonical stage-2 page tables.
90          */
91         phys_addr_t     pgd_phys;
92         struct kvm_pgtable *pgt;
93
94         /* The last vcpu id that ran on each physical CPU */
95         int __percpu *last_vcpu_ran;
96
97         struct kvm *kvm;
98 };
99
100 struct kvm_arch_memory_slot {
101 };
102
103 struct kvm_arch {
104         struct kvm_s2_mmu mmu;
105
106         /* VTCR_EL2 value for this VM */
107         u64    vtcr;
108
109         /* The maximum number of vCPUs depends on the used GIC model */
110         int max_vcpus;
111
112         /* Interrupt controller */
113         struct vgic_dist        vgic;
114
115         /* Mandated version of PSCI */
116         u32 psci_version;
117
118         /*
119          * If we encounter a data abort without valid instruction syndrome
120          * information, report this to user space.  User space can (and
121          * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
122          * supported.
123          */
124         bool return_nisv_io_abort_to_user;
125
126         /*
127          * VM-wide PMU filter, implemented as a bitmap and big enough for
128          * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
129          */
130         unsigned long *pmu_filter;
131         unsigned int pmuver;
132
133         u8 pfr0_csv2;
134         u8 pfr0_csv3;
135 };
136
137 struct kvm_vcpu_fault_info {
138         u32 esr_el2;            /* Hyp Syndrom Register */
139         u64 far_el2;            /* Hyp Fault Address Register */
140         u64 hpfar_el2;          /* Hyp IPA Fault Address Register */
141         u64 disr_el1;           /* Deferred [SError] Status Register */
142 };
143
144 enum vcpu_sysreg {
145         __INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
146         MPIDR_EL1,      /* MultiProcessor Affinity Register */
147         CSSELR_EL1,     /* Cache Size Selection Register */
148         SCTLR_EL1,      /* System Control Register */
149         ACTLR_EL1,      /* Auxiliary Control Register */
150         CPACR_EL1,      /* Coprocessor Access Control */
151         ZCR_EL1,        /* SVE Control */
152         TTBR0_EL1,      /* Translation Table Base Register 0 */
153         TTBR1_EL1,      /* Translation Table Base Register 1 */
154         TCR_EL1,        /* Translation Control Register */
155         ESR_EL1,        /* Exception Syndrome Register */
156         AFSR0_EL1,      /* Auxiliary Fault Status Register 0 */
157         AFSR1_EL1,      /* Auxiliary Fault Status Register 1 */
158         FAR_EL1,        /* Fault Address Register */
159         MAIR_EL1,       /* Memory Attribute Indirection Register */
160         VBAR_EL1,       /* Vector Base Address Register */
161         CONTEXTIDR_EL1, /* Context ID Register */
162         TPIDR_EL0,      /* Thread ID, User R/W */
163         TPIDRRO_EL0,    /* Thread ID, User R/O */
164         TPIDR_EL1,      /* Thread ID, Privileged */
165         AMAIR_EL1,      /* Aux Memory Attribute Indirection Register */
166         CNTKCTL_EL1,    /* Timer Control Register (EL1) */
167         PAR_EL1,        /* Physical Address Register */
168         MDSCR_EL1,      /* Monitor Debug System Control Register */
169         MDCCINT_EL1,    /* Monitor Debug Comms Channel Interrupt Enable Reg */
170         DISR_EL1,       /* Deferred Interrupt Status Register */
171
172         /* Performance Monitors Registers */
173         PMCR_EL0,       /* Control Register */
174         PMSELR_EL0,     /* Event Counter Selection Register */
175         PMEVCNTR0_EL0,  /* Event Counter Register (0-30) */
176         PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
177         PMCCNTR_EL0,    /* Cycle Counter Register */
178         PMEVTYPER0_EL0, /* Event Type Register (0-30) */
179         PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
180         PMCCFILTR_EL0,  /* Cycle Count Filter Register */
181         PMCNTENSET_EL0, /* Count Enable Set Register */
182         PMINTENSET_EL1, /* Interrupt Enable Set Register */
183         PMOVSSET_EL0,   /* Overflow Flag Status Set Register */
184         PMSWINC_EL0,    /* Software Increment Register */
185         PMUSERENR_EL0,  /* User Enable Register */
186
187         /* Pointer Authentication Registers in a strict increasing order. */
188         APIAKEYLO_EL1,
189         APIAKEYHI_EL1,
190         APIBKEYLO_EL1,
191         APIBKEYHI_EL1,
192         APDAKEYLO_EL1,
193         APDAKEYHI_EL1,
194         APDBKEYLO_EL1,
195         APDBKEYHI_EL1,
196         APGAKEYLO_EL1,
197         APGAKEYHI_EL1,
198
199         ELR_EL1,
200         SP_EL1,
201         SPSR_EL1,
202
203         CNTVOFF_EL2,
204         CNTV_CVAL_EL0,
205         CNTV_CTL_EL0,
206         CNTP_CVAL_EL0,
207         CNTP_CTL_EL0,
208
209         /* 32bit specific registers. Keep them at the end of the range */
210         DACR32_EL2,     /* Domain Access Control Register */
211         IFSR32_EL2,     /* Instruction Fault Status Register */
212         FPEXC32_EL2,    /* Floating-Point Exception Control Register */
213         DBGVCR32_EL2,   /* Debug Vector Catch Register */
214
215         NR_SYS_REGS     /* Nothing after this line! */
216 };
217
218 struct kvm_cpu_context {
219         struct user_pt_regs regs;       /* sp = sp_el0 */
220
221         u64     spsr_abt;
222         u64     spsr_und;
223         u64     spsr_irq;
224         u64     spsr_fiq;
225
226         struct user_fpsimd_state fp_regs;
227
228         u64 sys_regs[NR_SYS_REGS];
229
230         struct kvm_vcpu *__hyp_running_vcpu;
231 };
232
233 struct kvm_pmu_events {
234         u32 events_host;
235         u32 events_guest;
236 };
237
238 struct kvm_host_data {
239         struct kvm_cpu_context host_ctxt;
240         struct kvm_pmu_events pmu_events;
241 };
242
243 struct kvm_host_psci_config {
244         /* PSCI version used by host. */
245         u32 version;
246
247         /* Function IDs used by host if version is v0.1. */
248         struct psci_0_1_function_ids function_ids_0_1;
249
250         bool psci_0_1_cpu_suspend_implemented;
251         bool psci_0_1_cpu_on_implemented;
252         bool psci_0_1_cpu_off_implemented;
253         bool psci_0_1_migrate_implemented;
254 };
255
256 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
257 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
258
259 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
260 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
261
262 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
263 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
264
265 struct vcpu_reset_state {
266         unsigned long   pc;
267         unsigned long   r0;
268         bool            be;
269         bool            reset;
270 };
271
272 struct kvm_vcpu_arch {
273         struct kvm_cpu_context ctxt;
274         void *sve_state;
275         unsigned int sve_max_vl;
276
277         /* Stage 2 paging state used by the hardware on next switch */
278         struct kvm_s2_mmu *hw_mmu;
279
280         /* HYP configuration */
281         u64 hcr_el2;
282         u32 mdcr_el2;
283
284         /* Exception Information */
285         struct kvm_vcpu_fault_info fault;
286
287         /* State of various workarounds, see kvm_asm.h for bit assignment */
288         u64 workaround_flags;
289
290         /* Miscellaneous vcpu state flags */
291         u64 flags;
292
293         /*
294          * We maintain more than a single set of debug registers to support
295          * debugging the guest from the host and to maintain separate host and
296          * guest state during world switches. vcpu_debug_state are the debug
297          * registers of the vcpu as the guest sees them.  host_debug_state are
298          * the host registers which are saved and restored during
299          * world switches. external_debug_state contains the debug
300          * values we want to debug the guest. This is set via the
301          * KVM_SET_GUEST_DEBUG ioctl.
302          *
303          * debug_ptr points to the set of debug registers that should be loaded
304          * onto the hardware when running the guest.
305          */
306         struct kvm_guest_debug_arch *debug_ptr;
307         struct kvm_guest_debug_arch vcpu_debug_state;
308         struct kvm_guest_debug_arch external_debug_state;
309
310         struct thread_info *host_thread_info;   /* hyp VA */
311         struct user_fpsimd_state *host_fpsimd_state;    /* hyp VA */
312
313         struct {
314                 /* {Break,watch}point registers */
315                 struct kvm_guest_debug_arch regs;
316                 /* Statistical profiling extension */
317                 u64 pmscr_el1;
318         } host_debug_state;
319
320         /* VGIC state */
321         struct vgic_cpu vgic_cpu;
322         struct arch_timer_cpu timer_cpu;
323         struct kvm_pmu pmu;
324
325         /*
326          * Anything that is not used directly from assembly code goes
327          * here.
328          */
329
330         /*
331          * Guest registers we preserve during guest debugging.
332          *
333          * These shadow registers are updated by the kvm_handle_sys_reg
334          * trap handler if the guest accesses or updates them while we
335          * are using guest debug.
336          */
337         struct {
338                 u32     mdscr_el1;
339         } guest_debug_preserved;
340
341         /* vcpu power-off state */
342         bool power_off;
343
344         /* Don't run the guest (internal implementation need) */
345         bool pause;
346
347         /* Cache some mmu pages needed inside spinlock regions */
348         struct kvm_mmu_memory_cache mmu_page_cache;
349
350         /* Target CPU and feature flags */
351         int target;
352         DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
353
354         /* Detect first run of a vcpu */
355         bool has_run_once;
356
357         /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
358         u64 vsesr_el2;
359
360         /* Additional reset state */
361         struct vcpu_reset_state reset_state;
362
363         /* True when deferrable sysregs are loaded on the physical CPU,
364          * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
365         bool sysregs_loaded_on_cpu;
366
367         /* Guest PV state */
368         struct {
369                 u64 last_steal;
370                 gpa_t base;
371         } steal;
372 };
373
374 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
375 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
376                                       sve_ffr_offset((vcpu)->arch.sve_max_vl)))
377
378 #define vcpu_sve_state_size(vcpu) ({                                    \
379         size_t __size_ret;                                              \
380         unsigned int __vcpu_vq;                                         \
381                                                                         \
382         if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
383                 __size_ret = 0;                                         \
384         } else {                                                        \
385                 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);    \
386                 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
387         }                                                               \
388                                                                         \
389         __size_ret;                                                     \
390 })
391
392 /* vcpu_arch flags field values: */
393 #define KVM_ARM64_DEBUG_DIRTY           (1 << 0)
394 #define KVM_ARM64_FP_ENABLED            (1 << 1) /* guest FP regs loaded */
395 #define KVM_ARM64_FP_HOST               (1 << 2) /* host FP regs loaded */
396 #define KVM_ARM64_HOST_SVE_IN_USE       (1 << 3) /* backup for host TIF_SVE */
397 #define KVM_ARM64_HOST_SVE_ENABLED      (1 << 4) /* SVE enabled for EL0 */
398 #define KVM_ARM64_GUEST_HAS_SVE         (1 << 5) /* SVE exposed to guest */
399 #define KVM_ARM64_VCPU_SVE_FINALIZED    (1 << 6) /* SVE config completed */
400 #define KVM_ARM64_GUEST_HAS_PTRAUTH     (1 << 7) /* PTRAUTH exposed to guest */
401 #define KVM_ARM64_PENDING_EXCEPTION     (1 << 8) /* Exception pending */
402 #define KVM_ARM64_EXCEPT_MASK           (7 << 9) /* Target EL/MODE */
403
404 /*
405  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
406  * take the following values:
407  *
408  * For AArch32 EL1:
409  */
410 #define KVM_ARM64_EXCEPT_AA32_UND       (0 << 9)
411 #define KVM_ARM64_EXCEPT_AA32_IABT      (1 << 9)
412 #define KVM_ARM64_EXCEPT_AA32_DABT      (2 << 9)
413 /* For AArch64: */
414 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC  (0 << 9)
415 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ   (1 << 9)
416 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ   (2 << 9)
417 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR  (3 << 9)
418 #define KVM_ARM64_EXCEPT_AA64_EL1       (0 << 11)
419 #define KVM_ARM64_EXCEPT_AA64_EL2       (1 << 11)
420
421 /*
422  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
423  * set together with an exception...
424  */
425 #define KVM_ARM64_INCREMENT_PC          (1 << 9) /* Increment PC */
426
427 #define vcpu_has_sve(vcpu) (system_supports_sve() &&                    \
428                             ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
429
430 #ifdef CONFIG_ARM64_PTR_AUTH
431 #define vcpu_has_ptrauth(vcpu)                                          \
432         ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||                \
433           cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&               \
434          (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
435 #else
436 #define vcpu_has_ptrauth(vcpu)          false
437 #endif
438
439 #define vcpu_gp_regs(v)         (&(v)->arch.ctxt.regs)
440
441 /*
442  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
443  * memory backed version of a register, and not the one most recently
444  * accessed by a running VCPU.  For example, for userspace access or
445  * for system registers that are never context switched, but only
446  * emulated.
447  */
448 #define __ctxt_sys_reg(c,r)     (&(c)->sys_regs[(r)])
449
450 #define ctxt_sys_reg(c,r)       (*__ctxt_sys_reg(c,r))
451
452 #define __vcpu_sys_reg(v,r)     (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
453
454 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
455 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
456
457 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
458 {
459         /*
460          * *** VHE ONLY ***
461          *
462          * System registers listed in the switch are not saved on every
463          * exit from the guest but are only saved on vcpu_put.
464          *
465          * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
466          * should never be listed below, because the guest cannot modify its
467          * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
468          * thread when emulating cross-VCPU communication.
469          */
470         if (!has_vhe())
471                 return false;
472
473         switch (reg) {
474         case CSSELR_EL1:        *val = read_sysreg_s(SYS_CSSELR_EL1);   break;
475         case SCTLR_EL1:         *val = read_sysreg_s(SYS_SCTLR_EL12);   break;
476         case CPACR_EL1:         *val = read_sysreg_s(SYS_CPACR_EL12);   break;
477         case TTBR0_EL1:         *val = read_sysreg_s(SYS_TTBR0_EL12);   break;
478         case TTBR1_EL1:         *val = read_sysreg_s(SYS_TTBR1_EL12);   break;
479         case TCR_EL1:           *val = read_sysreg_s(SYS_TCR_EL12);     break;
480         case ESR_EL1:           *val = read_sysreg_s(SYS_ESR_EL12);     break;
481         case AFSR0_EL1:         *val = read_sysreg_s(SYS_AFSR0_EL12);   break;
482         case AFSR1_EL1:         *val = read_sysreg_s(SYS_AFSR1_EL12);   break;
483         case FAR_EL1:           *val = read_sysreg_s(SYS_FAR_EL12);     break;
484         case MAIR_EL1:          *val = read_sysreg_s(SYS_MAIR_EL12);    break;
485         case VBAR_EL1:          *val = read_sysreg_s(SYS_VBAR_EL12);    break;
486         case CONTEXTIDR_EL1:    *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
487         case TPIDR_EL0:         *val = read_sysreg_s(SYS_TPIDR_EL0);    break;
488         case TPIDRRO_EL0:       *val = read_sysreg_s(SYS_TPIDRRO_EL0);  break;
489         case TPIDR_EL1:         *val = read_sysreg_s(SYS_TPIDR_EL1);    break;
490         case AMAIR_EL1:         *val = read_sysreg_s(SYS_AMAIR_EL12);   break;
491         case CNTKCTL_EL1:       *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
492         case ELR_EL1:           *val = read_sysreg_s(SYS_ELR_EL12);     break;
493         case PAR_EL1:           *val = read_sysreg_par();               break;
494         case DACR32_EL2:        *val = read_sysreg_s(SYS_DACR32_EL2);   break;
495         case IFSR32_EL2:        *val = read_sysreg_s(SYS_IFSR32_EL2);   break;
496         case DBGVCR32_EL2:      *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
497         default:                return false;
498         }
499
500         return true;
501 }
502
503 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
504 {
505         /*
506          * *** VHE ONLY ***
507          *
508          * System registers listed in the switch are not restored on every
509          * entry to the guest but are only restored on vcpu_load.
510          *
511          * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
512          * should never be listed below, because the MPIDR should only be set
513          * once, before running the VCPU, and never changed later.
514          */
515         if (!has_vhe())
516                 return false;
517
518         switch (reg) {
519         case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    break;
520         case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    break;
521         case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    break;
522         case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    break;
523         case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    break;
524         case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      break;
525         case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      break;
526         case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    break;
527         case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    break;
528         case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      break;
529         case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     break;
530         case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     break;
531         case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
532         case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     break;
533         case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   break;
534         case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     break;
535         case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    break;
536         case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  break;
537         case ELR_EL1:           write_sysreg_s(val, SYS_ELR_EL12);      break;
538         case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       break;
539         case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    break;
540         case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    break;
541         case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  break;
542         default:                return false;
543         }
544
545         return true;
546 }
547
548 struct kvm_vm_stat {
549         ulong remote_tlb_flush;
550 };
551
552 struct kvm_vcpu_stat {
553         u64 halt_successful_poll;
554         u64 halt_attempted_poll;
555         u64 halt_poll_success_ns;
556         u64 halt_poll_fail_ns;
557         u64 halt_poll_invalid;
558         u64 halt_wakeup;
559         u64 hvc_exit_stat;
560         u64 wfe_exit_stat;
561         u64 wfi_exit_stat;
562         u64 mmio_exit_user;
563         u64 mmio_exit_kernel;
564         u64 exits;
565 };
566
567 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
568 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
569 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
570 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
571 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
572
573 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
574 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
575 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
576 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
577
578 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
579                               struct kvm_vcpu_events *events);
580
581 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
582                               struct kvm_vcpu_events *events);
583
584 #define KVM_ARCH_WANT_MMU_NOTIFIER
585 int kvm_unmap_hva_range(struct kvm *kvm,
586                         unsigned long start, unsigned long end, unsigned flags);
587 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
588 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
589 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
590
591 void kvm_arm_halt_guest(struct kvm *kvm);
592 void kvm_arm_resume_guest(struct kvm *kvm);
593
594 #define kvm_call_hyp_nvhe(f, ...)                                               \
595         ({                                                              \
596                 struct arm_smccc_res res;                               \
597                                                                         \
598                 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),               \
599                                   ##__VA_ARGS__, &res);                 \
600                 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);                   \
601                                                                         \
602                 res.a1;                                                 \
603         })
604
605 /*
606  * The couple of isb() below are there to guarantee the same behaviour
607  * on VHE as on !VHE, where the eret to EL1 acts as a context
608  * synchronization event.
609  */
610 #define kvm_call_hyp(f, ...)                                            \
611         do {                                                            \
612                 if (has_vhe()) {                                        \
613                         f(__VA_ARGS__);                                 \
614                         isb();                                          \
615                 } else {                                                \
616                         kvm_call_hyp_nvhe(f, ##__VA_ARGS__);            \
617                 }                                                       \
618         } while(0)
619
620 #define kvm_call_hyp_ret(f, ...)                                        \
621         ({                                                              \
622                 typeof(f(__VA_ARGS__)) ret;                             \
623                                                                         \
624                 if (has_vhe()) {                                        \
625                         ret = f(__VA_ARGS__);                           \
626                         isb();                                          \
627                 } else {                                                \
628                         ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);      \
629                 }                                                       \
630                                                                         \
631                 ret;                                                    \
632         })
633
634 void force_vm_exit(const cpumask_t *mask);
635 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
636
637 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
638 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
639
640 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
641 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
642 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
643 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
644 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
645 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
646
647 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
648
649 void kvm_sys_reg_table_init(void);
650
651 /* MMIO helpers */
652 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
653 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
654
655 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
656 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
657
658 int kvm_perf_init(void);
659 int kvm_perf_teardown(void);
660
661 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
662 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
663 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
664
665 bool kvm_arm_pvtime_supported(void);
666 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
667                             struct kvm_device_attr *attr);
668 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
669                             struct kvm_device_attr *attr);
670 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
671                             struct kvm_device_attr *attr);
672
673 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
674 {
675         vcpu_arch->steal.base = GPA_INVALID;
676 }
677
678 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
679 {
680         return (vcpu_arch->steal.base != GPA_INVALID);
681 }
682
683 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
684
685 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
686
687 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
688
689 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
690 {
691         /* The host's MPIDR is immutable, so let's set it up at boot time */
692         ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
693 }
694
695 static inline bool kvm_arch_requires_vhe(void)
696 {
697         /*
698          * The Arm architecture specifies that implementation of SVE
699          * requires VHE also to be implemented.  The KVM code for arm64
700          * relies on this when SVE is present:
701          */
702         if (system_supports_sve())
703                 return true;
704
705         return false;
706 }
707
708 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
709
710 static inline void kvm_arch_hardware_unsetup(void) {}
711 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
712 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
713 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
714
715 void kvm_arm_init_debug(void);
716 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
717 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
718 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
719 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
720                                struct kvm_device_attr *attr);
721 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
722                                struct kvm_device_attr *attr);
723 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
724                                struct kvm_device_attr *attr);
725
726 /* Guest/host FPSIMD coordination helpers */
727 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
728 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
729 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
730 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
731
732 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
733 {
734         return (!has_vhe() && attr->exclude_host);
735 }
736
737 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
738 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
739 {
740         return kvm_arch_vcpu_run_map_fp(vcpu);
741 }
742
743 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
744 void kvm_clr_pmu_events(u32 clr);
745
746 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
747 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
748 #else
749 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
750 static inline void kvm_clr_pmu_events(u32 clr) {}
751 #endif
752
753 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
754 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
755
756 int kvm_set_ipa_limit(void);
757
758 #define __KVM_HAVE_ARCH_VM_ALLOC
759 struct kvm *kvm_arch_alloc_vm(void);
760 void kvm_arch_free_vm(struct kvm *kvm);
761
762 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
763
764 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
765 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
766
767 #define kvm_arm_vcpu_sve_finalized(vcpu) \
768         ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
769
770 #define kvm_vcpu_has_pmu(vcpu)                                  \
771         (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
772
773 int kvm_trng_call(struct kvm_vcpu *vcpu);
774
775 #endif /* __ARM64_KVM_HOST_H__ */