1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kdebug.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sched/signal.h>
19 #include <linux/kvm_host.h>
21 #include <asm/hwcap.h>
23 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
24 KVM_GENERIC_VCPU_STATS(),
25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
27 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
29 STATS_DESC_COUNTER(VCPU, exits)
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
41 #define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
42 riscv_isa_extension_mask(f))
44 #define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
45 riscv_isa_extension_mask(c) | \
46 riscv_isa_extension_mask(i) | \
47 riscv_isa_extension_mask(m))
49 #define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
50 KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
52 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
54 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
55 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
56 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
57 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
61 * The preemption should be disabled here because it races with
62 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
63 * also calls vcpu_load/put.
66 loaded = (vcpu->cpu != -1);
68 kvm_arch_vcpu_put(vcpu);
70 memcpy(csr, reset_csr, sizeof(*csr));
72 memcpy(cntx, reset_cntx, sizeof(*cntx));
74 kvm_riscv_vcpu_fp_reset(vcpu);
76 kvm_riscv_vcpu_timer_reset(vcpu);
78 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
79 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
81 /* Reset the guest CSRs for hotplug usecase */
83 kvm_arch_vcpu_load(vcpu, smp_processor_id());
87 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
92 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
94 struct kvm_cpu_context *cntx;
95 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
97 /* Mark this VCPU never ran */
98 vcpu->arch.ran_atleast_once = false;
99 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
101 /* Setup ISA features available to VCPU */
102 vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
104 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
105 cntx = &vcpu->arch.guest_reset_context;
106 cntx->sstatus = SR_SPP | SR_SPIE;
108 cntx->hstatus |= HSTATUS_VTW;
109 cntx->hstatus |= HSTATUS_SPVP;
110 cntx->hstatus |= HSTATUS_SPV;
112 /* By default, make CY, TM, and IR counters accessible in VU mode */
113 reset_csr->scounteren = 0x7;
115 /* Setup VCPU timer */
116 kvm_riscv_vcpu_timer_init(vcpu);
119 kvm_riscv_reset_vcpu(vcpu);
124 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
127 * vcpu with id 0 is the designated boot cpu.
128 * Keep all vcpus with non-zero id in power-off state so that
129 * they can be brought up using SBI HSM extension.
131 if (vcpu->vcpu_idx != 0)
132 kvm_riscv_vcpu_power_off(vcpu);
135 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
137 /* Cleanup VCPU timer */
138 kvm_riscv_vcpu_timer_deinit(vcpu);
140 /* Free unused pages pre-allocated for Stage2 page table mappings */
141 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
144 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
146 return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
149 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
153 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
157 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
159 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
160 !vcpu->arch.power_off && !vcpu->arch.pause);
163 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
165 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
168 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
170 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
173 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
175 return VM_FAULT_SIGBUS;
178 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
179 const struct kvm_one_reg *reg)
181 unsigned long __user *uaddr =
182 (unsigned long __user *)(unsigned long)reg->addr;
183 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
185 KVM_REG_RISCV_CONFIG);
186 unsigned long reg_val;
188 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
192 case KVM_REG_RISCV_CONFIG_REG(isa):
193 reg_val = vcpu->arch.isa;
199 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
205 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
206 const struct kvm_one_reg *reg)
208 unsigned long __user *uaddr =
209 (unsigned long __user *)(unsigned long)reg->addr;
210 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
212 KVM_REG_RISCV_CONFIG);
213 unsigned long reg_val;
215 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
218 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
222 case KVM_REG_RISCV_CONFIG_REG(isa):
223 if (!vcpu->arch.ran_atleast_once) {
224 /* Ignore the disable request for these extensions */
225 vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
226 vcpu->arch.isa &= riscv_isa_extension_base(NULL);
227 vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
228 kvm_riscv_vcpu_fp_reset(vcpu);
240 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
241 const struct kvm_one_reg *reg)
243 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
244 unsigned long __user *uaddr =
245 (unsigned long __user *)(unsigned long)reg->addr;
246 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
249 unsigned long reg_val;
251 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
253 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
256 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
257 reg_val = cntx->sepc;
258 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
259 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
260 reg_val = ((unsigned long *)cntx)[reg_num];
261 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
262 reg_val = (cntx->sstatus & SR_SPP) ?
263 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
267 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
273 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
274 const struct kvm_one_reg *reg)
276 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
277 unsigned long __user *uaddr =
278 (unsigned long __user *)(unsigned long)reg->addr;
279 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
282 unsigned long reg_val;
284 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
286 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
289 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
292 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
293 cntx->sepc = reg_val;
294 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
295 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
296 ((unsigned long *)cntx)[reg_num] = reg_val;
297 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
298 if (reg_val == KVM_RISCV_MODE_S)
299 cntx->sstatus |= SR_SPP;
301 cntx->sstatus &= ~SR_SPP;
308 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
309 const struct kvm_one_reg *reg)
311 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
312 unsigned long __user *uaddr =
313 (unsigned long __user *)(unsigned long)reg->addr;
314 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
317 unsigned long reg_val;
319 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
321 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
324 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
325 kvm_riscv_vcpu_flush_interrupts(vcpu);
326 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
328 reg_val = ((unsigned long *)csr)[reg_num];
330 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
336 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
337 const struct kvm_one_reg *reg)
339 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
340 unsigned long __user *uaddr =
341 (unsigned long __user *)(unsigned long)reg->addr;
342 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
345 unsigned long reg_val;
347 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
349 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
352 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
355 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
356 reg_val &= VSIP_VALID_MASK;
357 reg_val <<= VSIP_TO_HVIP_SHIFT;
360 ((unsigned long *)csr)[reg_num] = reg_val;
362 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
363 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
368 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
369 const struct kvm_one_reg *reg)
371 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
372 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
373 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
374 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
375 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
376 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
377 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
378 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
379 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
380 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
382 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
383 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
389 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
390 const struct kvm_one_reg *reg)
392 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
393 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
394 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
395 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
396 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
397 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
398 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
399 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
400 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
401 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
403 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
404 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
410 long kvm_arch_vcpu_async_ioctl(struct file *filp,
411 unsigned int ioctl, unsigned long arg)
413 struct kvm_vcpu *vcpu = filp->private_data;
414 void __user *argp = (void __user *)arg;
416 if (ioctl == KVM_INTERRUPT) {
417 struct kvm_interrupt irq;
419 if (copy_from_user(&irq, argp, sizeof(irq)))
422 if (irq.irq == KVM_INTERRUPT_SET)
423 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
425 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
431 long kvm_arch_vcpu_ioctl(struct file *filp,
432 unsigned int ioctl, unsigned long arg)
434 struct kvm_vcpu *vcpu = filp->private_data;
435 void __user *argp = (void __user *)arg;
439 case KVM_SET_ONE_REG:
440 case KVM_GET_ONE_REG: {
441 struct kvm_one_reg reg;
444 if (copy_from_user(®, argp, sizeof(reg)))
447 if (ioctl == KVM_SET_ONE_REG)
448 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
450 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
460 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
461 struct kvm_sregs *sregs)
466 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
467 struct kvm_sregs *sregs)
472 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
477 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
482 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
483 struct kvm_translation *tr)
488 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
493 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
498 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
500 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
501 unsigned long mask, val;
503 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
504 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
505 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
512 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
515 struct kvm_vcpu_arch *v = &vcpu->arch;
516 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
518 /* Read current HVIP and VSIE CSRs */
519 csr->vsie = csr_read(CSR_VSIE);
521 /* Sync-up HVIP.VSSIP bit changes does by Guest */
522 hvip = csr_read(CSR_HVIP);
523 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
524 if (hvip & (1UL << IRQ_VS_SOFT)) {
525 if (!test_and_set_bit(IRQ_VS_SOFT,
526 &v->irqs_pending_mask))
527 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
529 if (!test_and_set_bit(IRQ_VS_SOFT,
530 &v->irqs_pending_mask))
531 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
536 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
538 if (irq != IRQ_VS_SOFT &&
539 irq != IRQ_VS_TIMER &&
543 set_bit(irq, &vcpu->arch.irqs_pending);
544 smp_mb__before_atomic();
545 set_bit(irq, &vcpu->arch.irqs_pending_mask);
552 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
554 if (irq != IRQ_VS_SOFT &&
555 irq != IRQ_VS_TIMER &&
559 clear_bit(irq, &vcpu->arch.irqs_pending);
560 smp_mb__before_atomic();
561 set_bit(irq, &vcpu->arch.irqs_pending_mask);
566 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
568 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
569 << VSIP_TO_HVIP_SHIFT) & mask;
571 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
574 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
576 vcpu->arch.power_off = true;
577 kvm_make_request(KVM_REQ_SLEEP, vcpu);
581 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
583 vcpu->arch.power_off = false;
584 kvm_vcpu_wake_up(vcpu);
587 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
588 struct kvm_mp_state *mp_state)
590 if (vcpu->arch.power_off)
591 mp_state->mp_state = KVM_MP_STATE_STOPPED;
593 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
598 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
599 struct kvm_mp_state *mp_state)
603 switch (mp_state->mp_state) {
604 case KVM_MP_STATE_RUNNABLE:
605 vcpu->arch.power_off = false;
607 case KVM_MP_STATE_STOPPED:
608 kvm_riscv_vcpu_power_off(vcpu);
617 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
618 struct kvm_guest_debug *dbg)
620 /* TODO; To be implemented later. */
624 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
626 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
628 csr_write(CSR_VSSTATUS, csr->vsstatus);
629 csr_write(CSR_VSIE, csr->vsie);
630 csr_write(CSR_VSTVEC, csr->vstvec);
631 csr_write(CSR_VSSCRATCH, csr->vsscratch);
632 csr_write(CSR_VSEPC, csr->vsepc);
633 csr_write(CSR_VSCAUSE, csr->vscause);
634 csr_write(CSR_VSTVAL, csr->vstval);
635 csr_write(CSR_HVIP, csr->hvip);
636 csr_write(CSR_VSATP, csr->vsatp);
638 kvm_riscv_stage2_update_hgatp(vcpu);
640 kvm_riscv_vcpu_timer_restore(vcpu);
642 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
643 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
649 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
651 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
655 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
657 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
659 csr->vsstatus = csr_read(CSR_VSSTATUS);
660 csr->vsie = csr_read(CSR_VSIE);
661 csr->vstvec = csr_read(CSR_VSTVEC);
662 csr->vsscratch = csr_read(CSR_VSSCRATCH);
663 csr->vsepc = csr_read(CSR_VSEPC);
664 csr->vscause = csr_read(CSR_VSCAUSE);
665 csr->vstval = csr_read(CSR_VSTVAL);
666 csr->hvip = csr_read(CSR_HVIP);
667 csr->vsatp = csr_read(CSR_VSATP);
670 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
672 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
674 if (kvm_request_pending(vcpu)) {
675 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
676 rcuwait_wait_event(wait,
677 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
680 if (vcpu->arch.power_off || vcpu->arch.pause) {
682 * Awaken to handle a signal, request to
685 kvm_make_request(KVM_REQ_SLEEP, vcpu);
689 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
690 kvm_riscv_reset_vcpu(vcpu);
692 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
693 kvm_riscv_stage2_update_hgatp(vcpu);
695 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
696 __kvm_riscv_hfence_gvma_all();
700 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
702 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
704 csr_write(CSR_HVIP, csr->hvip);
708 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
709 * the vCPU is running.
711 * This must be noinstr as instrumentation may make use of RCU, and this is not
712 * safe during the EQS.
714 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
716 guest_state_enter_irqoff();
717 __kvm_riscv_switch_to(&vcpu->arch);
718 guest_state_exit_irqoff();
721 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
724 struct kvm_cpu_trap trap;
725 struct kvm_run *run = vcpu->run;
727 /* Mark this VCPU ran at least once */
728 vcpu->arch.ran_atleast_once = true;
730 kvm_vcpu_srcu_read_lock(vcpu);
732 /* Process MMIO value returned from user-space */
733 if (run->exit_reason == KVM_EXIT_MMIO) {
734 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
736 kvm_vcpu_srcu_read_unlock(vcpu);
741 /* Process SBI value returned from user-space */
742 if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
743 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
745 kvm_vcpu_srcu_read_unlock(vcpu);
750 if (run->immediate_exit) {
751 kvm_vcpu_srcu_read_unlock(vcpu);
757 kvm_sigset_activate(vcpu);
760 run->exit_reason = KVM_EXIT_UNKNOWN;
762 /* Check conditions before entering the guest */
765 kvm_riscv_stage2_vmid_update(vcpu);
767 kvm_riscv_check_vcpu_requests(vcpu);
774 * Exit if we have a signal pending so that we can deliver
775 * the signal to user space.
777 if (signal_pending(current)) {
779 run->exit_reason = KVM_EXIT_INTR;
783 * Ensure we set mode to IN_GUEST_MODE after we disable
784 * interrupts and before the final VCPU requests check.
785 * See the comment in kvm_vcpu_exiting_guest_mode() and
786 * Documentation/virt/kvm/vcpu-requests.rst
788 vcpu->mode = IN_GUEST_MODE;
790 kvm_vcpu_srcu_read_unlock(vcpu);
791 smp_mb__after_srcu_read_unlock();
794 * We might have got VCPU interrupts updated asynchronously
795 * so update it in HW.
797 kvm_riscv_vcpu_flush_interrupts(vcpu);
799 /* Update HVIP CSR for current CPU */
800 kvm_riscv_update_hvip(vcpu);
803 kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
804 kvm_request_pending(vcpu)) {
805 vcpu->mode = OUTSIDE_GUEST_MODE;
808 kvm_vcpu_srcu_read_lock(vcpu);
812 guest_timing_enter_irqoff();
814 kvm_riscv_vcpu_enter_exit(vcpu);
816 vcpu->mode = OUTSIDE_GUEST_MODE;
820 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
821 * get an interrupt between __kvm_riscv_switch_to() and
822 * local_irq_enable() which can potentially change CSRs.
824 trap.sepc = vcpu->arch.guest_context.sepc;
825 trap.scause = csr_read(CSR_SCAUSE);
826 trap.stval = csr_read(CSR_STVAL);
827 trap.htval = csr_read(CSR_HTVAL);
828 trap.htinst = csr_read(CSR_HTINST);
830 /* Syncup interrupts state with HW */
831 kvm_riscv_vcpu_sync_interrupts(vcpu);
834 * We must ensure that any pending interrupts are taken before
835 * we exit guest timing so that timer ticks are accounted as
836 * guest time. Transiently unmask interrupts so that any
837 * pending interrupts are taken.
839 * There's no barrier which ensures that pending interrupts are
840 * recognised, so we just hope that the CPU takes any pending
841 * interrupts between the enable and disable.
846 guest_timing_exit_irqoff();
852 kvm_vcpu_srcu_read_lock(vcpu);
854 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
857 kvm_sigset_deactivate(vcpu);
861 kvm_vcpu_srcu_read_unlock(vcpu);