2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
21 #include <linux/memblock.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
35 #define CREATE_TRACE_POINTS
39 #define VECTORSPACING 0x100 /* for EI/VI mode */
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43 VCPU_STAT("wait", wait_exits),
44 VCPU_STAT("cache", cache_exits),
45 VCPU_STAT("signal", signal_exits),
46 VCPU_STAT("interrupt", int_exits),
47 VCPU_STAT("cop_unusable", cop_unusable_exits),
48 VCPU_STAT("tlbmod", tlbmod_exits),
49 VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
50 VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
51 VCPU_STAT("addrerr_st", addrerr_st_exits),
52 VCPU_STAT("addrerr_ld", addrerr_ld_exits),
53 VCPU_STAT("syscall", syscall_exits),
54 VCPU_STAT("resvd_inst", resvd_inst_exits),
55 VCPU_STAT("break_inst", break_inst_exits),
56 VCPU_STAT("trap_inst", trap_inst_exits),
57 VCPU_STAT("msa_fpe", msa_fpe_exits),
58 VCPU_STAT("fpe", fpe_exits),
59 VCPU_STAT("msa_disabled", msa_disabled_exits),
60 VCPU_STAT("flush_dcache", flush_dcache_exits),
61 #ifdef CONFIG_KVM_MIPS_VZ
62 VCPU_STAT("vz_gpsi", vz_gpsi_exits),
63 VCPU_STAT("vz_gsfc", vz_gsfc_exits),
64 VCPU_STAT("vz_hc", vz_hc_exits),
65 VCPU_STAT("vz_grr", vz_grr_exits),
66 VCPU_STAT("vz_gva", vz_gva_exits),
67 VCPU_STAT("vz_ghfc", vz_ghfc_exits),
68 VCPU_STAT("vz_gpa", vz_gpa_exits),
69 VCPU_STAT("vz_resvd", vz_resvd_exits),
71 VCPU_STAT("halt_successful_poll", halt_successful_poll),
72 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
73 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
74 VCPU_STAT("halt_wakeup", halt_wakeup),
78 bool kvm_trace_guest_mode_change;
80 int kvm_guest_mode_change_trace_reg(void)
82 kvm_trace_guest_mode_change = 1;
86 void kvm_guest_mode_change_trace_unreg(void)
88 kvm_trace_guest_mode_change = 0;
92 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
93 * Config7, so we are "runnable" if interrupts are pending
95 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
97 return !!(vcpu->arch.pending_exceptions);
100 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
105 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
110 int kvm_arch_hardware_enable(void)
112 return kvm_mips_callbacks->hardware_enable();
115 void kvm_arch_hardware_disable(void)
117 kvm_mips_callbacks->hardware_disable();
120 int kvm_arch_hardware_setup(void *opaque)
125 int kvm_arch_check_processor_compat(void *opaque)
130 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
133 #ifdef CONFIG_KVM_MIPS_VZ
140 /* Unsupported KVM type */
144 /* Allocate page table to map GPA -> RPA */
145 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
146 if (!kvm->arch.gpa_mm.pgd)
152 void kvm_mips_free_vcpus(struct kvm *kvm)
155 struct kvm_vcpu *vcpu;
157 kvm_for_each_vcpu(i, vcpu, kvm) {
158 kvm_vcpu_destroy(vcpu);
161 mutex_lock(&kvm->lock);
163 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
164 kvm->vcpus[i] = NULL;
166 atomic_set(&kvm->online_vcpus, 0);
168 mutex_unlock(&kvm->lock);
171 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
173 /* It should always be safe to remove after flushing the whole range */
174 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
175 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
178 void kvm_arch_destroy_vm(struct kvm *kvm)
180 kvm_mips_free_vcpus(kvm);
181 kvm_mips_free_gpa_pt(kvm);
184 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
190 void kvm_arch_flush_shadow_all(struct kvm *kvm)
192 /* Flush whole GPA */
193 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
195 /* Let implementation do the rest */
196 kvm_mips_callbacks->flush_shadow_all(kvm);
199 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
200 struct kvm_memory_slot *slot)
203 * The slot has been made invalid (ready for moving or deletion), so we
204 * need to ensure that it can no longer be accessed by any guest VCPUs.
207 spin_lock(&kvm->mmu_lock);
208 /* Flush slot from GPA */
209 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
210 slot->base_gfn + slot->npages - 1);
211 /* Let implementation do the rest */
212 kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
213 spin_unlock(&kvm->mmu_lock);
216 int kvm_arch_prepare_memory_region(struct kvm *kvm,
217 struct kvm_memory_slot *memslot,
218 const struct kvm_userspace_memory_region *mem,
219 enum kvm_mr_change change)
224 void kvm_arch_commit_memory_region(struct kvm *kvm,
225 const struct kvm_userspace_memory_region *mem,
226 struct kvm_memory_slot *old,
227 const struct kvm_memory_slot *new,
228 enum kvm_mr_change change)
232 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
233 __func__, kvm, mem->slot, mem->guest_phys_addr,
234 mem->memory_size, mem->userspace_addr);
237 * If dirty page logging is enabled, write protect all pages in the slot
238 * ready for dirty logging.
240 * There is no need to do this in any of the following cases:
241 * CREATE: No dirty mappings will already exist.
242 * MOVE/DELETE: The old mappings will already have been cleaned up by
243 * kvm_arch_flush_shadow_memslot()
245 if (change == KVM_MR_FLAGS_ONLY &&
246 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
247 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
248 spin_lock(&kvm->mmu_lock);
249 /* Write protect GPA page table entries */
250 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
251 new->base_gfn + new->npages - 1);
252 /* Let implementation do the rest */
254 kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
255 spin_unlock(&kvm->mmu_lock);
259 static inline void dump_handler(const char *symbol, void *start, void *end)
263 pr_debug("LEAF(%s)\n", symbol);
265 pr_debug("\t.set push\n");
266 pr_debug("\t.set noreorder\n");
268 for (p = start; p < (u32 *)end; ++p)
269 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
271 pr_debug("\t.set\tpop\n");
273 pr_debug("\tEND(%s)\n", symbol);
276 /* low level hrtimer wake routine */
277 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
279 struct kvm_vcpu *vcpu;
281 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
283 kvm_mips_callbacks->queue_timer_int(vcpu);
286 rcuwait_wake_up(&vcpu->wait);
288 return kvm_mips_count_timeout(vcpu);
291 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
296 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
299 void *gebase, *p, *handler, *refill_start, *refill_end;
302 kvm_debug("kvm @ %p: create cpu %d at %p\n",
303 vcpu->kvm, vcpu->vcpu_id, vcpu);
305 err = kvm_mips_callbacks->vcpu_init(vcpu);
309 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
311 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
314 * Allocate space for host mode exception handlers that handle
317 if (cpu_has_veic || cpu_has_vint)
318 size = 0x200 + VECTORSPACING * 64;
322 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
326 goto out_uninit_vcpu;
328 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
329 ALIGN(size, PAGE_SIZE), gebase);
332 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
333 * limits us to the low 512MB of physical address space. If the memory
334 * we allocate is out of range, just give up now.
336 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
337 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
340 goto out_free_gebase;
344 vcpu->arch.guest_ebase = gebase;
346 /* Build guest exception vectors dynamically in unmapped memory */
347 handler = gebase + 0x2000;
349 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
350 refill_start = gebase;
351 if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
352 refill_start += 0x080;
353 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
355 /* General Exception Entry point */
356 kvm_mips_build_exception(gebase + 0x180, handler);
358 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
359 for (i = 0; i < 8; i++) {
360 kvm_debug("L1 Vectored handler @ %p\n",
361 gebase + 0x200 + (i * VECTORSPACING));
362 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
366 /* General exit handler */
368 p = kvm_mips_build_exit(p);
370 /* Guest entry routine */
371 vcpu->arch.vcpu_run = p;
372 p = kvm_mips_build_vcpu_run(p);
374 /* Dump the generated code */
375 pr_debug("#include <asm/asm.h>\n");
376 pr_debug("#include <asm/regdef.h>\n");
378 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
379 dump_handler("kvm_tlb_refill", refill_start, refill_end);
380 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
381 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
383 /* Invalidate the icache for these ranges */
384 flush_icache_range((unsigned long)gebase,
385 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
388 * Allocate comm page for guest kernel, a TLB will be reserved for
389 * mapping GVA @ 0xFFFF8000 to this page
391 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
393 if (!vcpu->arch.kseg0_commpage) {
395 goto out_free_gebase;
398 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
399 kvm_mips_commpage_init(vcpu);
402 vcpu->arch.last_sched_cpu = -1;
403 vcpu->arch.last_exec_cpu = -1;
405 /* Initial guest state */
406 err = kvm_mips_callbacks->vcpu_setup(vcpu);
408 goto out_free_commpage;
413 kfree(vcpu->arch.kseg0_commpage);
417 kvm_mips_callbacks->vcpu_uninit(vcpu);
421 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
423 hrtimer_cancel(&vcpu->arch.comparecount_timer);
425 kvm_mips_dump_stats(vcpu);
427 kvm_mmu_free_memory_caches(vcpu);
428 kfree(vcpu->arch.guest_ebase);
429 kfree(vcpu->arch.kseg0_commpage);
431 kvm_mips_callbacks->vcpu_uninit(vcpu);
434 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
435 struct kvm_guest_debug *dbg)
440 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
442 struct kvm_run *run = vcpu->run;
447 kvm_sigset_activate(vcpu);
449 if (vcpu->mmio_needed) {
450 if (!vcpu->mmio_is_write)
451 kvm_mips_complete_mmio_load(vcpu, run);
452 vcpu->mmio_needed = 0;
455 if (run->immediate_exit)
461 guest_enter_irqoff();
462 trace_kvm_enter(vcpu);
465 * Make sure the read of VCPU requests in vcpu_run() callback is not
466 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
467 * flush request while the requester sees the VCPU as outside of guest
468 * mode and not needing an IPI.
470 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
472 r = kvm_mips_callbacks->vcpu_run(run, vcpu);
479 kvm_sigset_deactivate(vcpu);
485 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
486 struct kvm_mips_interrupt *irq)
488 int intr = (int)irq->irq;
489 struct kvm_vcpu *dvcpu = NULL;
491 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
492 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
498 dvcpu = vcpu->kvm->vcpus[irq->cpu];
500 if (intr == 2 || intr == 3 || intr == 4) {
501 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
503 } else if (intr == -2 || intr == -3 || intr == -4) {
504 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
506 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
511 dvcpu->arch.wait = 0;
513 rcuwait_wake_up(&dvcpu->wait);
518 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
519 struct kvm_mp_state *mp_state)
524 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
525 struct kvm_mp_state *mp_state)
530 static u64 kvm_mips_get_one_regs[] = {
564 #ifndef CONFIG_CPU_MIPSR6
571 static u64 kvm_mips_get_one_regs_fpu[] = {
573 KVM_REG_MIPS_FCR_CSR,
576 static u64 kvm_mips_get_one_regs_msa[] = {
578 KVM_REG_MIPS_MSA_CSR,
581 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
585 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
586 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
587 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
589 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
592 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
593 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
594 ret += kvm_mips_callbacks->num_regs(vcpu);
599 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
604 if (copy_to_user(indices, kvm_mips_get_one_regs,
605 sizeof(kvm_mips_get_one_regs)))
607 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
609 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
610 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
611 sizeof(kvm_mips_get_one_regs_fpu)))
613 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
615 for (i = 0; i < 32; ++i) {
616 index = KVM_REG_MIPS_FPR_32(i);
617 if (copy_to_user(indices, &index, sizeof(index)))
621 /* skip odd doubles if no F64 */
622 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
625 index = KVM_REG_MIPS_FPR_64(i);
626 if (copy_to_user(indices, &index, sizeof(index)))
632 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
633 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
634 sizeof(kvm_mips_get_one_regs_msa)))
636 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
638 for (i = 0; i < 32; ++i) {
639 index = KVM_REG_MIPS_VEC_128(i);
640 if (copy_to_user(indices, &index, sizeof(index)))
646 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
649 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
650 const struct kvm_one_reg *reg)
652 struct mips_coproc *cop0 = vcpu->arch.cop0;
653 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
660 /* General purpose registers */
661 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
662 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
664 #ifndef CONFIG_CPU_MIPSR6
665 case KVM_REG_MIPS_HI:
666 v = (long)vcpu->arch.hi;
668 case KVM_REG_MIPS_LO:
669 v = (long)vcpu->arch.lo;
672 case KVM_REG_MIPS_PC:
673 v = (long)vcpu->arch.pc;
676 /* Floating point registers */
677 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
678 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
680 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
681 /* Odd singles in top of even double when FR=0 */
682 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
683 v = get_fpr32(&fpu->fpr[idx], 0);
685 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
687 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
688 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
690 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
691 /* Can't access odd doubles in FR=0 mode */
692 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
694 v = get_fpr64(&fpu->fpr[idx], 0);
696 case KVM_REG_MIPS_FCR_IR:
697 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
699 v = boot_cpu_data.fpu_id;
701 case KVM_REG_MIPS_FCR_CSR:
702 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
707 /* MIPS SIMD Architecture (MSA) registers */
708 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
709 if (!kvm_mips_guest_has_msa(&vcpu->arch))
711 /* Can't access MSA registers in FR=0 mode */
712 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
714 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
715 #ifdef CONFIG_CPU_LITTLE_ENDIAN
716 /* least significant byte first */
717 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
718 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
720 /* most significant byte first */
721 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
722 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
725 case KVM_REG_MIPS_MSA_IR:
726 if (!kvm_mips_guest_has_msa(&vcpu->arch))
728 v = boot_cpu_data.msa_id;
730 case KVM_REG_MIPS_MSA_CSR:
731 if (!kvm_mips_guest_has_msa(&vcpu->arch))
736 /* registers to be handled specially */
738 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
743 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
744 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
746 return put_user(v, uaddr64);
747 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
748 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
751 return put_user(v32, uaddr32);
752 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
753 void __user *uaddr = (void __user *)(long)reg->addr;
755 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
761 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
762 const struct kvm_one_reg *reg)
764 struct mips_coproc *cop0 = vcpu->arch.cop0;
765 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
770 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
771 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
773 if (get_user(v, uaddr64) != 0)
775 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
776 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
779 if (get_user(v32, uaddr32) != 0)
782 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
783 void __user *uaddr = (void __user *)(long)reg->addr;
785 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
791 /* General purpose registers */
792 case KVM_REG_MIPS_R0:
793 /* Silently ignore requests to set $0 */
795 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
796 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
798 #ifndef CONFIG_CPU_MIPSR6
799 case KVM_REG_MIPS_HI:
802 case KVM_REG_MIPS_LO:
806 case KVM_REG_MIPS_PC:
810 /* Floating point registers */
811 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
812 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
814 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
815 /* Odd singles in top of even double when FR=0 */
816 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
817 set_fpr32(&fpu->fpr[idx], 0, v);
819 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
821 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
822 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
824 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
825 /* Can't access odd doubles in FR=0 mode */
826 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
828 set_fpr64(&fpu->fpr[idx], 0, v);
830 case KVM_REG_MIPS_FCR_IR:
831 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
835 case KVM_REG_MIPS_FCR_CSR:
836 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
841 /* MIPS SIMD Architecture (MSA) registers */
842 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
843 if (!kvm_mips_guest_has_msa(&vcpu->arch))
845 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
846 #ifdef CONFIG_CPU_LITTLE_ENDIAN
847 /* least significant byte first */
848 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
849 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
851 /* most significant byte first */
852 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
853 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
856 case KVM_REG_MIPS_MSA_IR:
857 if (!kvm_mips_guest_has_msa(&vcpu->arch))
861 case KVM_REG_MIPS_MSA_CSR:
862 if (!kvm_mips_guest_has_msa(&vcpu->arch))
867 /* registers to be handled specially */
869 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
874 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
875 struct kvm_enable_cap *cap)
879 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
887 case KVM_CAP_MIPS_FPU:
888 vcpu->arch.fpu_enabled = true;
890 case KVM_CAP_MIPS_MSA:
891 vcpu->arch.msa_enabled = true;
901 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
904 struct kvm_vcpu *vcpu = filp->private_data;
905 void __user *argp = (void __user *)arg;
907 if (ioctl == KVM_INTERRUPT) {
908 struct kvm_mips_interrupt irq;
910 if (copy_from_user(&irq, argp, sizeof(irq)))
912 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
915 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
921 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
924 struct kvm_vcpu *vcpu = filp->private_data;
925 void __user *argp = (void __user *)arg;
931 case KVM_SET_ONE_REG:
932 case KVM_GET_ONE_REG: {
933 struct kvm_one_reg reg;
936 if (copy_from_user(®, argp, sizeof(reg)))
938 if (ioctl == KVM_SET_ONE_REG)
939 r = kvm_mips_set_reg(vcpu, ®);
941 r = kvm_mips_get_reg(vcpu, ®);
944 case KVM_GET_REG_LIST: {
945 struct kvm_reg_list __user *user_list = argp;
946 struct kvm_reg_list reg_list;
950 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
953 reg_list.n = kvm_mips_num_regs(vcpu);
954 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
959 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
962 case KVM_ENABLE_CAP: {
963 struct kvm_enable_cap cap;
966 if (copy_from_user(&cap, argp, sizeof(cap)))
968 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
979 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
984 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
985 struct kvm_memory_slot *memslot)
987 /* Let implementation handle TLB/GVA invalidation */
988 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
991 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1003 int kvm_arch_init(void *opaque)
1005 if (kvm_mips_callbacks) {
1006 kvm_err("kvm: module already exists\n");
1010 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1013 void kvm_arch_exit(void)
1015 kvm_mips_callbacks = NULL;
1018 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1019 struct kvm_sregs *sregs)
1021 return -ENOIOCTLCMD;
1024 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1025 struct kvm_sregs *sregs)
1027 return -ENOIOCTLCMD;
1030 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1034 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1036 return -ENOIOCTLCMD;
1039 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1041 return -ENOIOCTLCMD;
1044 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1046 return VM_FAULT_SIGBUS;
1049 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1054 case KVM_CAP_ONE_REG:
1055 case KVM_CAP_ENABLE_CAP:
1056 case KVM_CAP_READONLY_MEM:
1057 case KVM_CAP_SYNC_MMU:
1058 case KVM_CAP_IMMEDIATE_EXIT:
1061 case KVM_CAP_NR_VCPUS:
1062 r = num_online_cpus();
1064 case KVM_CAP_MAX_VCPUS:
1067 case KVM_CAP_MAX_VCPU_ID:
1068 r = KVM_MAX_VCPU_ID;
1070 case KVM_CAP_MIPS_FPU:
1071 /* We don't handle systems with inconsistent cpu_has_fpu */
1072 r = !!raw_cpu_has_fpu;
1074 case KVM_CAP_MIPS_MSA:
1076 * We don't support MSA vector partitioning yet:
1077 * 1) It would require explicit support which can't be tested
1078 * yet due to lack of support in current hardware.
1079 * 2) It extends the state that would need to be saved/restored
1080 * by e.g. QEMU for migration.
1082 * When vector partitioning hardware becomes available, support
1083 * could be added by requiring a flag when enabling
1084 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1085 * to save/restore the appropriate extra state.
1087 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1090 r = kvm_mips_callbacks->check_extension(kvm, ext);
1096 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1098 return kvm_mips_pending_timer(vcpu) ||
1099 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1102 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1105 struct mips_coproc *cop0;
1110 kvm_debug("VCPU Register Dump:\n");
1111 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1112 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1114 for (i = 0; i < 32; i += 4) {
1115 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1117 vcpu->arch.gprs[i + 1],
1118 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1120 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1121 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1123 cop0 = vcpu->arch.cop0;
1124 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1125 kvm_read_c0_guest_status(cop0),
1126 kvm_read_c0_guest_cause(cop0));
1128 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1133 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1139 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1140 vcpu->arch.gprs[i] = regs->gpr[i];
1141 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1142 vcpu->arch.hi = regs->hi;
1143 vcpu->arch.lo = regs->lo;
1144 vcpu->arch.pc = regs->pc;
1150 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1156 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1157 regs->gpr[i] = vcpu->arch.gprs[i];
1159 regs->hi = vcpu->arch.hi;
1160 regs->lo = vcpu->arch.lo;
1161 regs->pc = vcpu->arch.pc;
1167 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1168 struct kvm_translation *tr)
1173 static void kvm_mips_set_c0_status(void)
1175 u32 status = read_c0_status();
1180 write_c0_status(status);
1185 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1187 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1189 u32 cause = vcpu->arch.host_cp0_cause;
1190 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1191 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1192 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1193 enum emulation_result er = EMULATE_DONE;
1195 int ret = RESUME_GUEST;
1197 vcpu->mode = OUTSIDE_GUEST_MODE;
1199 /* re-enable HTW before enabling interrupts */
1200 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1203 /* Set a default exit reason */
1204 run->exit_reason = KVM_EXIT_UNKNOWN;
1205 run->ready_for_interrupt_injection = 1;
1208 * Set the appropriate status bits based on host CPU features,
1209 * before we hit the scheduler
1211 kvm_mips_set_c0_status();
1215 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1216 cause, opc, run, vcpu);
1217 trace_kvm_exit(vcpu, exccode);
1219 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1221 * Do a privilege check, if in UM most of these exit conditions
1222 * end up causing an exception to be delivered to the Guest
1225 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1226 if (er == EMULATE_PRIV_FAIL) {
1228 } else if (er == EMULATE_FAIL) {
1229 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1237 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1239 ++vcpu->stat.int_exits;
1248 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1250 ++vcpu->stat.cop_unusable_exits;
1251 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1252 /* XXXKYMA: Might need to return to user space */
1253 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1258 ++vcpu->stat.tlbmod_exits;
1259 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1263 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1264 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1267 ++vcpu->stat.tlbmiss_st_exits;
1268 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1272 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1273 cause, opc, badvaddr);
1275 ++vcpu->stat.tlbmiss_ld_exits;
1276 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1280 ++vcpu->stat.addrerr_st_exits;
1281 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1285 ++vcpu->stat.addrerr_ld_exits;
1286 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1290 ++vcpu->stat.syscall_exits;
1291 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1295 ++vcpu->stat.resvd_inst_exits;
1296 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1300 ++vcpu->stat.break_inst_exits;
1301 ret = kvm_mips_callbacks->handle_break(vcpu);
1305 ++vcpu->stat.trap_inst_exits;
1306 ret = kvm_mips_callbacks->handle_trap(vcpu);
1309 case EXCCODE_MSAFPE:
1310 ++vcpu->stat.msa_fpe_exits;
1311 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1315 ++vcpu->stat.fpe_exits;
1316 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1319 case EXCCODE_MSADIS:
1320 ++vcpu->stat.msa_disabled_exits;
1321 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1325 /* defer exit accounting to handler */
1326 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1330 if (cause & CAUSEF_BD)
1333 kvm_get_badinstr(opc, vcpu, &inst);
1334 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1335 exccode, opc, inst, badvaddr,
1336 kvm_read_c0_guest_status(vcpu->arch.cop0));
1337 kvm_arch_vcpu_dump_regs(vcpu);
1338 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1345 local_irq_disable();
1347 if (ret == RESUME_GUEST)
1348 kvm_vz_acquire_htimer(vcpu);
1350 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1351 kvm_mips_deliver_interrupts(vcpu, cause);
1353 if (!(ret & RESUME_HOST)) {
1354 /* Only check for signals if not already exiting to userspace */
1355 if (signal_pending(current)) {
1356 run->exit_reason = KVM_EXIT_INTR;
1357 ret = (-EINTR << 2) | RESUME_HOST;
1358 ++vcpu->stat.signal_exits;
1359 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1363 if (ret == RESUME_GUEST) {
1364 trace_kvm_reenter(vcpu);
1367 * Make sure the read of VCPU requests in vcpu_reenter()
1368 * callback is not reordered ahead of the write to vcpu->mode,
1369 * or we could miss a TLB flush request while the requester sees
1370 * the VCPU as outside of guest mode and not needing an IPI.
1372 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1374 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1377 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1378 * is live), restore FCR31 / MSACSR.
1380 * This should be before returning to the guest exception
1381 * vector, as it may well cause an [MSA] FP exception if there
1382 * are pending exception bits unmasked. (see
1383 * kvm_mips_csr_die_notifier() for how that is handled).
1385 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1386 read_c0_status() & ST0_CU1)
1387 __kvm_restore_fcsr(&vcpu->arch);
1389 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1390 read_c0_config5() & MIPS_CONF5_MSAEN)
1391 __kvm_restore_msacsr(&vcpu->arch);
1394 /* Disable HTW before returning to guest or host */
1395 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1401 /* Enable FPU for guest and restore context */
1402 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1404 struct mips_coproc *cop0 = vcpu->arch.cop0;
1405 unsigned int sr, cfg5;
1409 sr = kvm_read_c0_guest_status(cop0);
1412 * If MSA state is already live, it is undefined how it interacts with
1413 * FR=0 FPU state, and we don't want to hit reserved instruction
1414 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1415 * play it safe and save it first.
1417 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1418 * get called when guest CU1 is set, however we can't trust the guest
1419 * not to clobber the status register directly via the commpage.
1421 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1422 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1426 * Enable FPU for guest
1427 * We set FR and FRE according to guest context
1429 change_c0_status(ST0_CU1 | ST0_FR, sr);
1431 cfg5 = kvm_read_c0_guest_config5(cop0);
1432 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1434 enable_fpu_hazard();
1436 /* If guest FPU state not active, restore it now */
1437 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1438 __kvm_restore_fpu(&vcpu->arch);
1439 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1440 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1442 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1448 #ifdef CONFIG_CPU_HAS_MSA
1449 /* Enable MSA for guest and restore context */
1450 void kvm_own_msa(struct kvm_vcpu *vcpu)
1452 struct mips_coproc *cop0 = vcpu->arch.cop0;
1453 unsigned int sr, cfg5;
1458 * Enable FPU if enabled in guest, since we're restoring FPU context
1459 * anyway. We set FR and FRE according to guest context.
1461 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1462 sr = kvm_read_c0_guest_status(cop0);
1465 * If FR=0 FPU state is already live, it is undefined how it
1466 * interacts with MSA state, so play it safe and save it first.
1468 if (!(sr & ST0_FR) &&
1469 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1470 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1473 change_c0_status(ST0_CU1 | ST0_FR, sr);
1474 if (sr & ST0_CU1 && cpu_has_fre) {
1475 cfg5 = kvm_read_c0_guest_config5(cop0);
1476 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1480 /* Enable MSA for guest */
1481 set_c0_config5(MIPS_CONF5_MSAEN);
1482 enable_fpu_hazard();
1484 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1485 case KVM_MIPS_AUX_FPU:
1487 * Guest FPU state already loaded, only restore upper MSA state
1489 __kvm_restore_msa_upper(&vcpu->arch);
1490 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1491 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1494 /* Neither FPU or MSA already active, restore full MSA state */
1495 __kvm_restore_msa(&vcpu->arch);
1496 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1497 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1498 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1499 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1500 KVM_TRACE_AUX_FPU_MSA);
1503 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1511 /* Drop FPU & MSA without saving it */
1512 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1515 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1517 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1518 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1520 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1521 clear_c0_status(ST0_CU1 | ST0_FR);
1522 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1523 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1528 /* Save and disable FPU & MSA */
1529 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1532 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1533 * is disabled in guest context (software), but the register state in
1534 * the hardware may still be in use.
1535 * This is why we explicitly re-enable the hardware before saving.
1539 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1540 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1541 set_c0_config5(MIPS_CONF5_MSAEN);
1542 enable_fpu_hazard();
1545 __kvm_save_msa(&vcpu->arch);
1546 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1548 /* Disable MSA & FPU */
1550 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1551 clear_c0_status(ST0_CU1 | ST0_FR);
1552 disable_fpu_hazard();
1554 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1555 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1556 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1557 set_c0_status(ST0_CU1);
1558 enable_fpu_hazard();
1561 __kvm_save_fpu(&vcpu->arch);
1562 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1563 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1566 clear_c0_status(ST0_CU1 | ST0_FR);
1567 disable_fpu_hazard();
1573 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1574 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1575 * exception if cause bits are set in the value being written.
1577 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1578 unsigned long cmd, void *ptr)
1580 struct die_args *args = (struct die_args *)ptr;
1581 struct pt_regs *regs = args->regs;
1584 /* Only interested in FPE and MSAFPE */
1585 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1588 /* Return immediately if guest context isn't active */
1589 if (!(current->flags & PF_VCPU))
1592 /* Should never get here from user mode */
1593 BUG_ON(user_mode(regs));
1595 pc = instruction_pointer(regs);
1598 /* match 2nd instruction in __kvm_restore_fcsr */
1599 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1603 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1605 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1606 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1611 /* Move PC forward a little and continue executing */
1612 instruction_pointer(regs) += 4;
1617 static struct notifier_block kvm_mips_csr_die_notifier = {
1618 .notifier_call = kvm_mips_csr_die_notify,
1621 static int __init kvm_mips_init(void)
1626 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1630 ret = kvm_mips_entry_setup();
1634 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1639 register_die_notifier(&kvm_mips_csr_die_notifier);
1644 static void __exit kvm_mips_exit(void)
1648 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1651 module_init(kvm_mips_init);
1652 module_exit(kvm_mips_exit);
1654 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);