1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
16 #include <asm/kvm_vcpu_fp.h>
17 #include <asm/kvm_vcpu_timer.h>
19 #define KVM_MAX_VCPUS \
20 ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
22 #define KVM_HALT_POLL_NS_DEFAULT 500000
24 #define KVM_VCPU_MAX_FEATURES 0
26 #define KVM_REQ_SLEEP \
27 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
28 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
29 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
32 struct kvm_vm_stat_generic generic;
35 struct kvm_vcpu_stat {
36 struct kvm_vcpu_stat_generic generic;
44 struct kvm_arch_memory_slot {
49 * Writes to vmid_version and vmid happen with vmid_lock held
50 * whereas reads happen without any lock held.
52 unsigned long vmid_version;
60 /* stage2 page table */
65 struct kvm_guest_timer timer;
68 struct kvm_mmio_decode {
76 struct kvm_sbi_context {
88 struct kvm_cpu_context {
122 unsigned long sstatus;
123 unsigned long hstatus;
124 union __riscv_fp_state fp;
127 struct kvm_vcpu_csr {
128 unsigned long vsstatus;
130 unsigned long vstvec;
131 unsigned long vsscratch;
133 unsigned long vscause;
134 unsigned long vstval;
137 unsigned long scounteren;
140 struct kvm_vcpu_arch {
141 /* VCPU ran at least once */
142 bool ran_atleast_once;
144 /* ISA feature bits (similar to MISA) */
147 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
148 unsigned long host_sscratch;
149 unsigned long host_stvec;
150 unsigned long host_scounteren;
152 /* CPU context of Host */
153 struct kvm_cpu_context host_context;
155 /* CPU context of Guest VCPU */
156 struct kvm_cpu_context guest_context;
158 /* CPU CSR context of Guest VCPU */
159 struct kvm_vcpu_csr guest_csr;
161 /* CPU context upon Guest VCPU reset */
162 struct kvm_cpu_context guest_reset_context;
164 /* CPU CSR context upon Guest VCPU reset */
165 struct kvm_vcpu_csr guest_reset_csr;
170 * We have a lockless approach for tracking pending VCPU interrupts
171 * implemented using atomic bitops. The irqs_pending bitmap represent
172 * pending interrupts whereas irqs_pending_mask represent bits changed
173 * in irqs_pending. Our approach is modeled around multiple producer
174 * and single consumer problem where the consumer is the VCPU itself.
176 unsigned long irqs_pending;
177 unsigned long irqs_pending_mask;
180 struct kvm_vcpu_timer timer;
182 /* MMIO instruction details */
183 struct kvm_mmio_decode mmio_decode;
186 struct kvm_sbi_context sbi_context;
188 /* Cache pages needed to program page tables with spinlock held */
189 struct kvm_mmu_memory_cache mmu_page_cache;
191 /* VCPU power-off state */
194 /* Don't run the VCPU (blocked) */
197 /* SRCU lock index for in-kernel run loop */
201 static inline void kvm_arch_hardware_unsetup(void) {}
202 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
203 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
205 #define KVM_ARCH_WANT_MMU_NOTIFIER
207 void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa_divby_4,
209 void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
210 void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
211 void __kvm_riscv_hfence_gvma_all(void);
213 int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
214 struct kvm_memory_slot *memslot,
215 gpa_t gpa, unsigned long hva, bool is_write);
216 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
217 void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
218 void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
219 void kvm_riscv_stage2_mode_detect(void);
220 unsigned long kvm_riscv_stage2_mode(void);
221 int kvm_riscv_stage2_gpa_bits(void);
223 void kvm_riscv_stage2_vmid_detect(void);
224 unsigned long kvm_riscv_stage2_vmid_bits(void);
225 int kvm_riscv_stage2_vmid_init(struct kvm *kvm);
226 bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid);
227 void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu);
229 void __kvm_riscv_unpriv_trap(void);
231 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
232 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
234 unsigned long guest_addr,
235 struct kvm_cpu_trap *trap);
236 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
237 struct kvm_cpu_trap *trap);
238 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
239 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
240 struct kvm_cpu_trap *trap);
242 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
244 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
245 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
246 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
247 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
248 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
249 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
250 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
252 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
253 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
255 #endif /* __RISCV_KVM_HOST_H__ */