2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Support for hardware virtualization extensions
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/preempt.h>
16 #include <linux/vmalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
19 #include <asm/cmpxchg.h>
21 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
24 #include <asm/r4kcache.h>
27 #include <asm/tlbex.h>
29 #include <linux/kvm_host.h>
31 #include "interrupt.h"
32 #include "loongson_regs.h"
36 /* Pointers to last VCPU loaded on each physical CPU */
37 static struct kvm_vcpu *last_vcpu[NR_CPUS];
38 /* Pointers to last VCPU executed on each physical CPU */
39 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42 * Number of guest VTLB entries to use, so we can catch inconsistency between
45 static unsigned int kvm_vz_guest_vtlb_size;
47 static inline long kvm_vz_read_gc0_ebase(void)
49 if (sizeof(long) == 8 && cpu_has_ebase_wg)
50 return read_gc0_ebase_64();
52 return read_gc0_ebase();
55 static inline void kvm_vz_write_gc0_ebase(long v)
58 * First write with WG=1 to write upper bits, then write again in case
59 * WG should be left at 0.
60 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
62 if (sizeof(long) == 8 &&
63 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
64 write_gc0_ebase_64(v | MIPS_EBASE_WG);
65 write_gc0_ebase_64(v);
67 write_gc0_ebase(v | MIPS_EBASE_WG);
73 * These Config bits may be writable by the guest:
74 * Config: [K23, KU] (!TLB), K0
76 * Config2: [TU, SU] (impl)
78 * Config4: FTLBPageSize
79 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
82 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
87 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
92 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
97 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
99 return MIPS_CONF3_ISA_OE;
102 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
104 /* no need to be exact */
105 return MIPS_CONF4_VFTLBPAGESIZE;
108 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
110 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
112 /* Permit MSAEn changes if MSA supported and enabled */
113 if (kvm_mips_guest_has_msa(&vcpu->arch))
114 mask |= MIPS_CONF5_MSAEN;
117 * Permit guest FPU mode changes if FPU is enabled and the relevant
118 * feature exists according to FIR register.
120 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
122 mask |= MIPS_CONF5_UFR;
124 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
130 static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
132 return MIPS_CONF6_LOONGSON_INTIMER | MIPS_CONF6_LOONGSON_EXTIMER;
136 * VZ optionally allows these additional Config bits to be written by root:
138 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
140 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
141 * VInt, SP, CDMM, MT, SM, TL]
142 * Config4: M, [VTLBSizeExt, MMUSizeExt]
146 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
148 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
153 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
155 /* Permit FPU to be present if FPU is supported */
156 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
157 mask |= MIPS_CONF1_FP;
162 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
164 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
169 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
170 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
172 /* Permit MSA to be present if MSA is supported */
173 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
174 mask |= MIPS_CONF3_MSA;
179 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
181 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
186 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189 static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
191 return kvm_vz_config6_guest_wrmask(vcpu) |
192 MIPS_CONF6_LOONGSON_SFBEN | MIPS_CONF6_LOONGSON_FTLBDIS;
195 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
197 /* VZ guest has already converted gva to gpa */
201 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
203 set_bit(priority, &vcpu->arch.pending_exceptions);
204 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
209 clear_bit(priority, &vcpu->arch.pending_exceptions);
210 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216 * timer expiry is asynchronous to vcpu execution therefore defer guest
219 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225 * timer expiry is asynchronous to vcpu execution therefore defer guest
228 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
232 struct kvm_mips_interrupt *irq)
234 int intr = (int)irq->irq;
237 * interrupts are asynchronous to vcpu execution therefore defer guest
240 kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
244 struct kvm_mips_interrupt *irq)
246 int intr = (int)irq->irq;
249 * interrupts are asynchronous to vcpu execution therefore defer guest
252 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258 u32 irq = (priority < MIPS_EXC_MAX) ?
259 kvm_priority_to_irq[priority] : 0;
262 case MIPS_EXC_INT_TIMER:
266 case MIPS_EXC_INT_IO_1:
267 case MIPS_EXC_INT_IO_2:
268 case MIPS_EXC_INT_IPI_1:
269 case MIPS_EXC_INT_IPI_2:
270 if (cpu_has_guestctl2)
271 set_c0_guestctl2(irq);
280 clear_bit(priority, &vcpu->arch.pending_exceptions);
284 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287 u32 irq = (priority < MIPS_EXC_MAX) ?
288 kvm_priority_to_irq[priority] : 0;
291 case MIPS_EXC_INT_TIMER:
293 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
294 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
295 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
296 * supported or if not using GuestCtl2 Hardware Clear.
298 if (cpu_has_guestctl2) {
299 if (!(read_c0_guestctl2() & (irq << 14)))
300 clear_c0_guestctl2(irq);
302 clear_gc0_cause(irq);
306 case MIPS_EXC_INT_IO_1:
307 case MIPS_EXC_INT_IO_2:
308 case MIPS_EXC_INT_IPI_1:
309 case MIPS_EXC_INT_IPI_2:
310 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
311 if (cpu_has_guestctl2) {
312 if (!(read_c0_guestctl2() & (irq << 14)))
313 clear_c0_guestctl2(irq);
315 clear_gc0_cause(irq);
323 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
328 * VZ guest timer handling.
332 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
333 * @vcpu: Virtual CPU.
335 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
336 * instead of software emulation of guest timer.
339 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
341 if (kvm_mips_count_disabled(vcpu))
344 /* Chosen frequency must match real frequency */
345 if (mips_hpt_frequency != vcpu->arch.count_hz)
348 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
349 if (current_cpu_data.gtoffset_mask != 0xffffffff)
356 * _kvm_vz_restore_stimer() - Restore soft timer state.
357 * @vcpu: Virtual CPU.
358 * @compare: CP0_Compare register value, restored by caller.
359 * @cause: CP0_Cause register to restore.
361 * Restore VZ state relating to the soft timer. The hard timer can be enabled
364 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
369 * after Guest CP0_Compare.
371 write_c0_gtoffset(compare - read_c0_count());
373 back_to_back_c0_hazard();
374 write_gc0_cause(cause);
378 * _kvm_vz_restore_htimer() - Restore hard timer state.
379 * @vcpu: Virtual CPU.
380 * @compare: CP0_Compare register value, restored by caller.
381 * @cause: CP0_Cause register to restore.
383 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
384 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
386 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
387 u32 compare, u32 cause)
389 u32 start_count, after_count;
394 * Freeze the soft-timer and sync the guest CP0_Count with it. We do
395 * this with interrupts disabled to avoid latency.
397 local_irq_save(flags);
398 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
399 write_c0_gtoffset(start_count - read_c0_count());
400 local_irq_restore(flags);
402 /* restore guest CP0_Cause, as TI may already be set */
403 back_to_back_c0_hazard();
404 write_gc0_cause(cause);
407 * The above sequence isn't atomic and would result in lost timer
408 * interrupts if we're not careful. Detect if a timer interrupt is due
411 back_to_back_c0_hazard();
412 after_count = read_gc0_count();
413 if (after_count - start_count > compare - start_count - 1)
414 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
418 * kvm_vz_restore_timer() - Restore timer state.
419 * @vcpu: Virtual CPU.
421 * Restore soft timer state from saved context.
423 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
425 struct mips_coproc *cop0 = vcpu->arch.cop0;
428 compare = kvm_read_sw_gc0_compare(cop0);
429 cause = kvm_read_sw_gc0_cause(cop0);
431 write_gc0_compare(compare);
432 _kvm_vz_restore_stimer(vcpu, compare, cause);
436 * kvm_vz_acquire_htimer() - Switch to hard timer state.
437 * @vcpu: Virtual CPU.
439 * Restore hard timer state on top of existing soft timer state if possible.
441 * Since hard timer won't remain active over preemption, preemption should be
442 * disabled by the caller.
444 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
448 gctl0 = read_c0_guestctl0();
449 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
450 /* enable guest access to hard timer */
451 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
453 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
459 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
460 * @vcpu: Virtual CPU.
461 * @compare: Pointer to write compare value to.
462 * @cause: Pointer to write cause value to.
464 * Save VZ guest timer state and switch to software emulation of guest CP0
465 * timer. The hard timer must already be in use, so preemption should be
468 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
469 u32 *out_compare, u32 *out_cause)
471 u32 cause, compare, before_count, end_count;
474 compare = read_gc0_compare();
475 *out_compare = compare;
477 before_time = ktime_get();
480 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
481 * at which no pending timer interrupt is missing.
483 before_count = read_gc0_count();
484 back_to_back_c0_hazard();
485 cause = read_gc0_cause();
489 * Record a final CP0_Count which we will transfer to the soft-timer.
490 * This is recorded *after* saving CP0_Cause, so we don't get any timer
491 * interrupts from just after the final CP0_Count point.
493 back_to_back_c0_hazard();
494 end_count = read_gc0_count();
497 * The above sequence isn't atomic, so we could miss a timer interrupt
498 * between reading CP0_Cause and end_count. Detect and record any timer
499 * interrupt due between before_count and end_count.
501 if (end_count - before_count > compare - before_count - 1)
502 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
505 * Restore soft-timer, ignoring a small amount of negative drift due to
506 * delay between freeze_hrtimer and setting CP0_GTOffset.
508 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
512 * kvm_vz_save_timer() - Save guest timer state.
513 * @vcpu: Virtual CPU.
515 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
518 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
520 struct mips_coproc *cop0 = vcpu->arch.cop0;
521 u32 gctl0, compare, cause;
523 gctl0 = read_c0_guestctl0();
524 if (gctl0 & MIPS_GCTL0_GT) {
525 /* disable guest use of hard timer */
526 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
528 /* save hard timer state */
529 _kvm_vz_save_htimer(vcpu, &compare, &cause);
531 compare = read_gc0_compare();
532 cause = read_gc0_cause();
535 /* save timer-related state to VCPU context */
536 kvm_write_sw_gc0_cause(cop0, cause);
537 kvm_write_sw_gc0_compare(cop0, compare);
541 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
542 * @vcpu: Virtual CPU.
544 * Transfers the state of the hard guest timer to the soft guest timer, leaving
545 * guest state intact so it can continue to be used with the soft timer.
547 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
549 u32 gctl0, compare, cause;
552 gctl0 = read_c0_guestctl0();
553 if (gctl0 & MIPS_GCTL0_GT) {
554 /* disable guest use of timer */
555 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
557 /* switch to soft timer */
558 _kvm_vz_save_htimer(vcpu, &compare, &cause);
560 /* leave soft timer in usable state */
561 _kvm_vz_restore_stimer(vcpu, compare, cause);
567 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
568 * @inst: 32-bit instruction encoding.
570 * Finds whether @inst encodes an EVA memory access instruction, which would
571 * indicate that emulation of it should access the user mode address space
572 * instead of the kernel mode address space. This matters for MUSUK segments
573 * which are TLB mapped for user mode but unmapped for kernel mode.
575 * Returns: Whether @inst encodes an EVA accessor instruction.
577 static bool is_eva_access(union mips_instruction inst)
579 if (inst.spec3_format.opcode != spec3_op)
582 switch (inst.spec3_format.func) {
606 * is_eva_am_mapped() - Find whether an access mode is mapped.
607 * @vcpu: KVM VCPU state.
608 * @am: 3-bit encoded access mode.
609 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
611 * Decode @am to find whether it encodes a mapped segment for the current VCPU
612 * state. Where necessary @eu and the actual instruction causing the fault are
613 * taken into account to make the decision.
615 * Returns: Whether the VCPU faulted on a TLB mapped address.
617 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
623 * Interpret access control mode. We assume address errors will already
624 * have been caught by the guest, leaving us with:
625 * AM UM SM KM 31..24 23..16
628 * MSK 2 010 TLB TLB 1
629 * MUSK 3 011 TLB TLB TLB 1
630 * MUSUK 4 100 TLB TLB Unm 0 1
631 * USK 5 101 Unm Unm 0 0
633 * UUSK 7 111 Unm Unm Unm 0 0
635 * We shift a magic value by AM across the sign bit to find if always
636 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
638 am_lookup = 0x70080000 << am;
639 if ((s32)am_lookup < 0) {
642 * Always TLB mapped, unless SegCtl.EU && ERL
644 if (!eu || !(read_gc0_status() & ST0_ERL))
648 if ((s32)am_lookup < 0) {
649 union mips_instruction inst;
655 * TLB mapped if not in kernel mode
657 status = read_gc0_status();
658 if (!(status & (ST0_EXL | ST0_ERL)) &&
662 * EVA access instructions in kernel
663 * mode access user address space.
665 opc = (u32 *)vcpu->arch.pc;
666 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
668 err = kvm_get_badinstr(opc, vcpu, &inst.word);
669 if (!err && is_eva_access(inst))
678 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
679 * @vcpu: KVM VCPU state.
680 * @gva: Guest virtual address to convert.
681 * @gpa: Output guest physical address.
683 * Convert a guest virtual address (GVA) which is valid according to the guest
684 * context, to a guest physical address (GPA).
686 * Returns: 0 on success.
689 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
693 unsigned long segctl;
695 if ((long)gva == (s32)gva32) {
696 /* Handle canonical 32-bit virtual address */
697 if (cpu_guest_has_segments) {
698 unsigned long mask, pa;
700 switch (gva32 >> 29) {
702 case 1: /* CFG5 (1GB) */
703 segctl = read_gc0_segctl2() >> 16;
704 mask = (unsigned long)0xfc0000000ull;
707 case 3: /* CFG4 (1GB) */
708 segctl = read_gc0_segctl2();
709 mask = (unsigned long)0xfc0000000ull;
711 case 4: /* CFG3 (512MB) */
712 segctl = read_gc0_segctl1() >> 16;
713 mask = (unsigned long)0xfe0000000ull;
715 case 5: /* CFG2 (512MB) */
716 segctl = read_gc0_segctl1();
717 mask = (unsigned long)0xfe0000000ull;
719 case 6: /* CFG1 (512MB) */
720 segctl = read_gc0_segctl0() >> 16;
721 mask = (unsigned long)0xfe0000000ull;
723 case 7: /* CFG0 (512MB) */
724 segctl = read_gc0_segctl0();
725 mask = (unsigned long)0xfe0000000ull;
729 * GCC 4.9 isn't smart enough to figure out that
730 * segctl and mask are always initialised.
735 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
739 /* Unmapped, find guest physical address */
740 pa = (segctl << 20) & mask;
744 } else if ((s32)gva32 < (s32)0xc0000000) {
745 /* legacy unmapped KSeg0 or KSeg1 */
746 *gpa = gva32 & 0x1fffffff;
750 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
752 if (cpu_guest_has_segments) {
754 * Each of the 8 regions can be overridden by SegCtl2.XR
755 * to use SegCtl1.XAM.
757 segctl = read_gc0_segctl2();
758 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
759 segctl = read_gc0_segctl1();
760 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
767 * Traditionally fully unmapped.
768 * Bits 61:59 specify the CCA, which we can just mask off here.
769 * Bits 58:PABITS should be zero, but we shouldn't have got here
772 *gpa = gva & 0x07ffffffffffffff;
778 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
782 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
783 * @vcpu: KVM VCPU state.
784 * @badvaddr: Root BadVAddr.
785 * @gpa: Output guest physical address.
787 * VZ implementations are permitted to report guest virtual addresses (GVA) in
788 * BadVAddr on a root exception during guest execution, instead of the more
789 * convenient guest physical addresses (GPA). When we get a GVA, this function
790 * converts it to a GPA, taking into account guest segmentation and guest TLB
793 * Returns: 0 on success.
796 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
799 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
800 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
802 /* If BadVAddr is GPA, then all is well in the world */
803 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
808 /* Otherwise we'd expect it to be GVA ... */
809 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
810 "Unexpected gexccode %#x\n", gexccode))
813 /* ... and we need to perform the GVA->GPA translation in software */
814 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
817 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
819 u32 *opc = (u32 *) vcpu->arch.pc;
820 u32 cause = vcpu->arch.host_cp0_cause;
821 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
822 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
826 * Fetch the instruction.
828 if (cause & CAUSEF_BD)
830 kvm_get_badinstr(opc, vcpu, &inst);
832 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
833 exccode, opc, inst, badvaddr,
835 kvm_arch_vcpu_dump_regs(vcpu);
836 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
840 static unsigned long mips_process_maar(unsigned int op, unsigned long val)
842 /* Mask off unused bits */
843 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
845 if (read_gc0_pagegrain() & PG_ELPA)
846 mask |= 0x00ffffff00000000ull;
847 if (cpu_guest_has_mvh)
848 mask |= MIPS_MAAR_VH;
850 /* Set or clear VH */
853 val &= ~MIPS_MAAR_VH;
854 } else if (op == dmtc_op) {
855 /* set VH to match VL */
856 val &= ~MIPS_MAAR_VH;
857 if (val & MIPS_MAAR_VL)
864 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
866 struct mips_coproc *cop0 = vcpu->arch.cop0;
868 val &= MIPS_MAARI_INDEX;
869 if (val == MIPS_MAARI_INDEX)
870 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
871 else if (val < ARRAY_SIZE(vcpu->arch.maar))
872 kvm_write_sw_gc0_maari(cop0, val);
875 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
878 struct kvm_vcpu *vcpu)
880 struct mips_coproc *cop0 = vcpu->arch.cop0;
881 enum emulation_result er = EMULATE_DONE;
883 unsigned long curr_pc;
887 * Update PC and hold onto current PC in case there is
888 * an error and we want to rollback the PC
890 curr_pc = vcpu->arch.pc;
891 er = update_pc(vcpu, cause);
892 if (er == EMULATE_FAIL)
895 if (inst.co_format.co) {
896 switch (inst.co_format.func) {
898 er = kvm_mips_emul_wait(vcpu);
904 rt = inst.c0r_format.rt;
905 rd = inst.c0r_format.rd;
906 sel = inst.c0r_format.sel;
908 switch (inst.c0r_format.rs) {
911 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
912 cop0->stat[rd][sel]++;
914 if (rd == MIPS_CP0_COUNT &&
915 sel == 0) { /* Count */
916 val = kvm_mips_read_count(vcpu);
917 } else if (rd == MIPS_CP0_COMPARE &&
918 sel == 0) { /* Compare */
919 val = read_gc0_compare();
920 } else if (rd == MIPS_CP0_LLADDR &&
921 sel == 0) { /* LLAddr */
922 if (cpu_guest_has_rw_llb)
923 val = read_gc0_lladdr() &
927 } else if (rd == MIPS_CP0_LLADDR &&
928 sel == 1 && /* MAAR */
929 cpu_guest_has_maar &&
930 !cpu_guest_has_dyn_maar) {
931 /* MAARI must be in range */
932 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
933 ARRAY_SIZE(vcpu->arch.maar));
934 val = vcpu->arch.maar[
935 kvm_read_sw_gc0_maari(cop0)];
936 } else if ((rd == MIPS_CP0_PRID &&
937 (sel == 0 || /* PRid */
938 sel == 2 || /* CDMMBase */
939 sel == 3)) || /* CMGCRBase */
940 (rd == MIPS_CP0_STATUS &&
941 (sel == 2 || /* SRSCtl */
942 sel == 3)) || /* SRSMap */
943 (rd == MIPS_CP0_CONFIG &&
944 (sel == 6 || /* Config6 */
945 sel == 7)) || /* Config7 */
946 (rd == MIPS_CP0_LLADDR &&
947 (sel == 2) && /* MAARI */
948 cpu_guest_has_maar &&
949 !cpu_guest_has_dyn_maar) ||
950 (rd == MIPS_CP0_ERRCTL &&
951 (sel == 0))) { /* ErrCtl */
952 val = cop0->reg[rd][sel];
953 #ifdef CONFIG_CPU_LOONGSON64
954 } else if (rd == MIPS_CP0_DIAG &&
955 (sel == 0)) { /* Diag */
956 val = cop0->reg[rd][sel];
963 if (er != EMULATE_FAIL) {
965 if (inst.c0r_format.rs == mfc_op)
967 vcpu->arch.gprs[rt] = val;
970 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
971 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
972 KVM_TRACE_COP0(rd, sel), val);
977 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
978 cop0->stat[rd][sel]++;
980 val = vcpu->arch.gprs[rt];
981 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
982 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
983 KVM_TRACE_COP0(rd, sel), val);
985 if (rd == MIPS_CP0_COUNT &&
986 sel == 0) { /* Count */
987 kvm_vz_lose_htimer(vcpu);
988 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
989 } else if (rd == MIPS_CP0_COMPARE &&
990 sel == 0) { /* Compare */
991 kvm_mips_write_compare(vcpu,
994 } else if (rd == MIPS_CP0_LLADDR &&
995 sel == 0) { /* LLAddr */
997 * P5600 generates GPSI on guest MTC0 LLAddr.
998 * Only allow the guest to clear LLB.
1000 if (cpu_guest_has_rw_llb &&
1001 !(val & MIPS_LLADDR_LLB))
1002 write_gc0_lladdr(0);
1003 } else if (rd == MIPS_CP0_LLADDR &&
1004 sel == 1 && /* MAAR */
1005 cpu_guest_has_maar &&
1006 !cpu_guest_has_dyn_maar) {
1007 val = mips_process_maar(inst.c0r_format.rs,
1010 /* MAARI must be in range */
1011 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1012 ARRAY_SIZE(vcpu->arch.maar));
1013 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1015 } else if (rd == MIPS_CP0_LLADDR &&
1016 (sel == 2) && /* MAARI */
1017 cpu_guest_has_maar &&
1018 !cpu_guest_has_dyn_maar) {
1019 kvm_write_maari(vcpu, val);
1020 } else if (rd == MIPS_CP0_CONFIG &&
1022 cop0->reg[rd][sel] = (int)val;
1023 } else if (rd == MIPS_CP0_ERRCTL &&
1024 (sel == 0)) { /* ErrCtl */
1025 /* ignore the written value */
1026 #ifdef CONFIG_CPU_LOONGSON64
1027 } else if (rd == MIPS_CP0_DIAG &&
1028 (sel == 0)) { /* Diag */
1029 unsigned long flags;
1031 local_irq_save(flags);
1032 if (val & LOONGSON_DIAG_BTB) {
1034 set_c0_diag(LOONGSON_DIAG_BTB);
1036 if (val & LOONGSON_DIAG_ITLB) {
1038 set_c0_diag(LOONGSON_DIAG_ITLB);
1040 if (val & LOONGSON_DIAG_DTLB) {
1042 set_c0_diag(LOONGSON_DIAG_DTLB);
1044 if (val & LOONGSON_DIAG_VTLB) {
1046 kvm_loongson_clear_guest_vtlb();
1048 if (val & LOONGSON_DIAG_FTLB) {
1050 kvm_loongson_clear_guest_ftlb();
1052 local_irq_restore(flags);
1064 /* Rollback PC only if emulation was unsuccessful */
1065 if (er == EMULATE_FAIL) {
1066 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1067 curr_pc, __func__, inst.word);
1069 vcpu->arch.pc = curr_pc;
1075 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1076 u32 *opc, u32 cause,
1077 struct kvm_run *run,
1078 struct kvm_vcpu *vcpu)
1080 enum emulation_result er = EMULATE_DONE;
1081 u32 cache, op_inst, op, base;
1083 struct kvm_vcpu_arch *arch = &vcpu->arch;
1084 unsigned long va, curr_pc;
1087 * Update PC and hold onto current PC in case there is
1088 * an error and we want to rollback the PC
1090 curr_pc = vcpu->arch.pc;
1091 er = update_pc(vcpu, cause);
1092 if (er == EMULATE_FAIL)
1095 base = inst.i_format.rs;
1096 op_inst = inst.i_format.rt;
1097 if (cpu_has_mips_r6)
1098 offset = inst.spec3_format.simmediate;
1100 offset = inst.i_format.simmediate;
1101 cache = op_inst & CacheOp_Cache;
1102 op = op_inst & CacheOp_Op;
1104 va = arch->gprs[base] + offset;
1106 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1107 cache, op, base, arch->gprs[base], offset);
1109 /* Secondary or tirtiary cache ops ignored */
1110 if (cache != Cache_I && cache != Cache_D)
1111 return EMULATE_DONE;
1114 case Index_Invalidate_I:
1115 flush_icache_line_indexed(va);
1116 return EMULATE_DONE;
1117 case Index_Writeback_Inv_D:
1118 flush_dcache_line_indexed(va);
1119 return EMULATE_DONE;
1120 case Hit_Invalidate_I:
1121 case Hit_Invalidate_D:
1122 case Hit_Writeback_Inv_D:
1123 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1124 /* We can just flush entire icache */
1125 local_flush_icache_range(0, 0);
1126 return EMULATE_DONE;
1129 /* So far, other platforms support guest hit cache ops */
1135 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1136 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1139 vcpu->arch.pc = curr_pc;
1141 return EMULATE_FAIL;
1144 #ifdef CONFIG_CPU_LOONGSON64
1145 static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1146 u32 *opc, u32 cause,
1147 struct kvm_run *run,
1148 struct kvm_vcpu *vcpu)
1150 unsigned int rs, rd;
1151 unsigned int hostcfg;
1152 unsigned long curr_pc;
1153 enum emulation_result er = EMULATE_DONE;
1156 * Update PC and hold onto current PC in case there is
1157 * an error and we want to rollback the PC
1159 curr_pc = vcpu->arch.pc;
1160 er = update_pc(vcpu, cause);
1161 if (er == EMULATE_FAIL)
1164 rs = inst.loongson3_lscsr_format.rs;
1165 rd = inst.loongson3_lscsr_format.rd;
1166 switch (inst.loongson3_lscsr_format.fr) {
1167 case 0x8: /* Read CPUCFG */
1168 ++vcpu->stat.vz_cpucfg_exits;
1169 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1171 switch (vcpu->arch.gprs[rs]) {
1173 vcpu->arch.gprs[rd] = 0x14c000;
1176 hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1177 LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1178 LOONGSON_CFG1_SFBP);
1179 vcpu->arch.gprs[rd] = hostcfg;
1182 hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1183 LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1184 vcpu->arch.gprs[rd] = hostcfg;
1187 vcpu->arch.gprs[rd] = hostcfg;
1190 /* Don't export any other advanced features to guest */
1191 vcpu->arch.gprs[rd] = 0;
1197 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1198 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1203 /* Rollback PC only if emulation was unsuccessful */
1204 if (er == EMULATE_FAIL) {
1205 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1206 curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1208 vcpu->arch.pc = curr_pc;
1215 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1216 struct kvm_vcpu *vcpu)
1218 enum emulation_result er = EMULATE_DONE;
1219 struct kvm_vcpu_arch *arch = &vcpu->arch;
1220 struct kvm_run *run = vcpu->run;
1221 union mips_instruction inst;
1226 * Fetch the instruction.
1228 if (cause & CAUSEF_BD)
1230 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1232 return EMULATE_FAIL;
1234 switch (inst.r_format.opcode) {
1236 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
1238 #ifndef CONFIG_CPU_MIPSR6
1240 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1241 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1244 #ifdef CONFIG_CPU_LOONGSON64
1246 er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
1250 switch (inst.spec3_format.func) {
1251 #ifdef CONFIG_CPU_MIPSR6
1253 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1254 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1258 if (inst.r_format.rs || (inst.r_format.re >> 3))
1261 rd = inst.r_format.rd;
1262 rt = inst.r_format.rt;
1263 sel = inst.r_format.re & 0x7;
1266 case MIPS_HWR_CC: /* Read count register */
1268 (long)(int)kvm_mips_read_count(vcpu);
1271 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1272 KVM_TRACE_HWR(rd, sel), 0);
1276 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1277 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1279 er = update_pc(vcpu, cause);
1288 kvm_err("GPSI exception not supported (%p/%#x)\n",
1290 kvm_arch_vcpu_dump_regs(vcpu);
1298 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1299 struct kvm_vcpu *vcpu)
1301 enum emulation_result er = EMULATE_DONE;
1302 struct kvm_vcpu_arch *arch = &vcpu->arch;
1303 union mips_instruction inst;
1307 * Fetch the instruction.
1309 if (cause & CAUSEF_BD)
1311 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1313 return EMULATE_FAIL;
1315 /* complete MTC0 on behalf of guest and advance EPC */
1316 if (inst.c0r_format.opcode == cop0_op &&
1317 inst.c0r_format.rs == mtc_op &&
1318 inst.c0r_format.z == 0) {
1319 int rt = inst.c0r_format.rt;
1320 int rd = inst.c0r_format.rd;
1321 int sel = inst.c0r_format.sel;
1322 unsigned int val = arch->gprs[rt];
1323 unsigned int old_val, change;
1325 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1328 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1329 /* FR bit should read as zero if no FPU */
1330 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1331 val &= ~(ST0_CU1 | ST0_FR);
1334 * Also don't allow FR to be set if host doesn't support
1337 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1340 old_val = read_gc0_status();
1341 change = val ^ old_val;
1343 if (change & ST0_FR) {
1345 * FPU and Vector register state is made
1346 * UNPREDICTABLE by a change of FR, so don't
1347 * even bother saving it.
1353 * If MSA state is already live, it is undefined how it
1354 * interacts with FR=0 FPU state, and we don't want to
1355 * hit reserved instruction exceptions trying to save
1356 * the MSA state later when CU=1 && FR=1, so play it
1357 * safe and save it first.
1359 if (change & ST0_CU1 && !(val & ST0_FR) &&
1360 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1363 write_gc0_status(val);
1364 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1365 u32 old_cause = read_gc0_cause();
1366 u32 change = old_cause ^ val;
1368 /* DC bit enabling/disabling timer? */
1369 if (change & CAUSEF_DC) {
1370 if (val & CAUSEF_DC) {
1371 kvm_vz_lose_htimer(vcpu);
1372 kvm_mips_count_disable_cause(vcpu);
1374 kvm_mips_count_enable_cause(vcpu);
1378 /* Only certain bits are RW to the guest */
1379 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1380 CAUSEF_IP0 | CAUSEF_IP1);
1382 /* WP can only be cleared */
1383 change &= ~CAUSEF_WP | old_cause;
1385 write_gc0_cause(old_cause ^ change);
1386 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1387 write_gc0_intctl(val);
1388 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1389 old_val = read_gc0_config5();
1390 change = val ^ old_val;
1391 /* Handle changes in FPU/MSA modes */
1395 * Propagate FRE changes immediately if the FPU
1396 * context is already loaded.
1398 if (change & MIPS_CONF5_FRE &&
1399 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1400 change_c0_config5(MIPS_CONF5_FRE, val);
1405 (change & kvm_vz_config5_guest_wrmask(vcpu));
1406 write_gc0_config5(val);
1408 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1413 if (er != EMULATE_FAIL)
1414 er = update_pc(vcpu, cause);
1416 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1424 static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1425 struct kvm_vcpu *vcpu)
1428 * Presumably this is due to MC (guest mode change), so lets trace some
1431 trace_kvm_guest_mode_change(vcpu);
1433 return EMULATE_DONE;
1436 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1437 struct kvm_vcpu *vcpu)
1439 enum emulation_result er;
1440 union mips_instruction inst;
1441 unsigned long curr_pc;
1444 if (cause & CAUSEF_BD)
1446 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1448 return EMULATE_FAIL;
1451 * Update PC and hold onto current PC in case there is
1452 * an error and we want to rollback the PC
1454 curr_pc = vcpu->arch.pc;
1455 er = update_pc(vcpu, cause);
1456 if (er == EMULATE_FAIL)
1459 er = kvm_mips_emul_hypcall(vcpu, inst);
1460 if (er == EMULATE_FAIL)
1461 vcpu->arch.pc = curr_pc;
1466 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1469 struct kvm_vcpu *vcpu)
1474 * Fetch the instruction.
1476 if (cause & CAUSEF_BD)
1478 kvm_get_badinstr(opc, vcpu, &inst);
1480 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1481 gexccode, opc, inst, read_gc0_status());
1483 return EMULATE_FAIL;
1486 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1488 u32 *opc = (u32 *) vcpu->arch.pc;
1489 u32 cause = vcpu->arch.host_cp0_cause;
1490 enum emulation_result er = EMULATE_DONE;
1491 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1492 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1493 int ret = RESUME_GUEST;
1495 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1497 case MIPS_GCTL0_GEXC_GPSI:
1498 ++vcpu->stat.vz_gpsi_exits;
1499 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1501 case MIPS_GCTL0_GEXC_GSFC:
1502 ++vcpu->stat.vz_gsfc_exits;
1503 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1505 case MIPS_GCTL0_GEXC_HC:
1506 ++vcpu->stat.vz_hc_exits;
1507 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1509 case MIPS_GCTL0_GEXC_GRR:
1510 ++vcpu->stat.vz_grr_exits;
1511 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1514 case MIPS_GCTL0_GEXC_GVA:
1515 ++vcpu->stat.vz_gva_exits;
1516 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1519 case MIPS_GCTL0_GEXC_GHFC:
1520 ++vcpu->stat.vz_ghfc_exits;
1521 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1523 case MIPS_GCTL0_GEXC_GPA:
1524 ++vcpu->stat.vz_gpa_exits;
1525 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1529 ++vcpu->stat.vz_resvd_exits;
1530 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1536 if (er == EMULATE_DONE) {
1538 } else if (er == EMULATE_HYPERCALL) {
1539 ret = kvm_mips_handle_hypcall(vcpu);
1541 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1548 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1549 * @vcpu: Virtual CPU context.
1551 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1552 * by the root context.
1554 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1556 struct kvm_run *run = vcpu->run;
1557 u32 cause = vcpu->arch.host_cp0_cause;
1558 enum emulation_result er = EMULATE_FAIL;
1559 int ret = RESUME_GUEST;
1561 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1563 * If guest FPU not present, the FPU operation should have been
1564 * treated as a reserved instruction!
1565 * If FPU already in use, we shouldn't get this at all.
1567 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1568 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1570 return EMULATE_FAIL;
1576 /* other coprocessors not handled */
1584 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1595 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1596 * @vcpu: Virtual CPU context.
1598 * Handle when the guest attempts to use MSA when it is disabled in the root
1601 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1603 struct kvm_run *run = vcpu->run;
1606 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1607 * should have been treated as a reserved instruction!
1608 * Same if CU1=1, FR=0.
1609 * If MSA already in use, we shouldn't get this at all.
1611 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1612 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1613 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1614 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1615 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1621 return RESUME_GUEST;
1624 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1626 struct kvm_run *run = vcpu->run;
1627 u32 *opc = (u32 *) vcpu->arch.pc;
1628 u32 cause = vcpu->arch.host_cp0_cause;
1629 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1630 union mips_instruction inst;
1631 enum emulation_result er = EMULATE_DONE;
1632 int err, ret = RESUME_GUEST;
1634 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1635 /* A code fetch fault doesn't count as an MMIO */
1636 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1637 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1641 /* Fetch the instruction */
1642 if (cause & CAUSEF_BD)
1644 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1646 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1651 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1652 if (er == EMULATE_FAIL) {
1653 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1655 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1659 if (er == EMULATE_DONE) {
1661 } else if (er == EMULATE_DO_MMIO) {
1662 run->exit_reason = KVM_EXIT_MMIO;
1665 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1671 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1673 struct kvm_run *run = vcpu->run;
1674 u32 *opc = (u32 *) vcpu->arch.pc;
1675 u32 cause = vcpu->arch.host_cp0_cause;
1676 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1677 union mips_instruction inst;
1678 enum emulation_result er = EMULATE_DONE;
1680 int ret = RESUME_GUEST;
1682 /* Just try the access again if we couldn't do the translation */
1683 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1684 return RESUME_GUEST;
1685 vcpu->arch.host_cp0_badvaddr = badvaddr;
1687 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1688 /* Fetch the instruction */
1689 if (cause & CAUSEF_BD)
1691 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1693 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1698 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1699 if (er == EMULATE_FAIL) {
1700 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1702 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1706 if (er == EMULATE_DONE) {
1708 } else if (er == EMULATE_DO_MMIO) {
1709 run->exit_reason = KVM_EXIT_MMIO;
1712 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1718 static u64 kvm_vz_get_one_regs[] = {
1719 KVM_REG_MIPS_CP0_INDEX,
1720 KVM_REG_MIPS_CP0_ENTRYLO0,
1721 KVM_REG_MIPS_CP0_ENTRYLO1,
1722 KVM_REG_MIPS_CP0_CONTEXT,
1723 KVM_REG_MIPS_CP0_PAGEMASK,
1724 KVM_REG_MIPS_CP0_PAGEGRAIN,
1725 KVM_REG_MIPS_CP0_WIRED,
1726 KVM_REG_MIPS_CP0_HWRENA,
1727 KVM_REG_MIPS_CP0_BADVADDR,
1728 KVM_REG_MIPS_CP0_COUNT,
1729 KVM_REG_MIPS_CP0_ENTRYHI,
1730 KVM_REG_MIPS_CP0_COMPARE,
1731 KVM_REG_MIPS_CP0_STATUS,
1732 KVM_REG_MIPS_CP0_INTCTL,
1733 KVM_REG_MIPS_CP0_CAUSE,
1734 KVM_REG_MIPS_CP0_EPC,
1735 KVM_REG_MIPS_CP0_PRID,
1736 KVM_REG_MIPS_CP0_EBASE,
1737 KVM_REG_MIPS_CP0_CONFIG,
1738 KVM_REG_MIPS_CP0_CONFIG1,
1739 KVM_REG_MIPS_CP0_CONFIG2,
1740 KVM_REG_MIPS_CP0_CONFIG3,
1741 KVM_REG_MIPS_CP0_CONFIG4,
1742 KVM_REG_MIPS_CP0_CONFIG5,
1743 KVM_REG_MIPS_CP0_CONFIG6,
1745 KVM_REG_MIPS_CP0_XCONTEXT,
1747 KVM_REG_MIPS_CP0_ERROREPC,
1749 KVM_REG_MIPS_COUNT_CTL,
1750 KVM_REG_MIPS_COUNT_RESUME,
1751 KVM_REG_MIPS_COUNT_HZ,
1754 static u64 kvm_vz_get_one_regs_contextconfig[] = {
1755 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1757 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1761 static u64 kvm_vz_get_one_regs_segments[] = {
1762 KVM_REG_MIPS_CP0_SEGCTL0,
1763 KVM_REG_MIPS_CP0_SEGCTL1,
1764 KVM_REG_MIPS_CP0_SEGCTL2,
1767 static u64 kvm_vz_get_one_regs_htw[] = {
1768 KVM_REG_MIPS_CP0_PWBASE,
1769 KVM_REG_MIPS_CP0_PWFIELD,
1770 KVM_REG_MIPS_CP0_PWSIZE,
1771 KVM_REG_MIPS_CP0_PWCTL,
1774 static u64 kvm_vz_get_one_regs_kscratch[] = {
1775 KVM_REG_MIPS_CP0_KSCRATCH1,
1776 KVM_REG_MIPS_CP0_KSCRATCH2,
1777 KVM_REG_MIPS_CP0_KSCRATCH3,
1778 KVM_REG_MIPS_CP0_KSCRATCH4,
1779 KVM_REG_MIPS_CP0_KSCRATCH5,
1780 KVM_REG_MIPS_CP0_KSCRATCH6,
1783 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1787 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1788 if (cpu_guest_has_userlocal)
1790 if (cpu_guest_has_badinstr)
1792 if (cpu_guest_has_badinstrp)
1794 if (cpu_guest_has_contextconfig)
1795 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1796 if (cpu_guest_has_segments)
1797 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1798 if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1799 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1800 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1801 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1802 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1807 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1812 if (copy_to_user(indices, kvm_vz_get_one_regs,
1813 sizeof(kvm_vz_get_one_regs)))
1815 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1817 if (cpu_guest_has_userlocal) {
1818 index = KVM_REG_MIPS_CP0_USERLOCAL;
1819 if (copy_to_user(indices, &index, sizeof(index)))
1823 if (cpu_guest_has_badinstr) {
1824 index = KVM_REG_MIPS_CP0_BADINSTR;
1825 if (copy_to_user(indices, &index, sizeof(index)))
1829 if (cpu_guest_has_badinstrp) {
1830 index = KVM_REG_MIPS_CP0_BADINSTRP;
1831 if (copy_to_user(indices, &index, sizeof(index)))
1835 if (cpu_guest_has_contextconfig) {
1836 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1837 sizeof(kvm_vz_get_one_regs_contextconfig)))
1839 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1841 if (cpu_guest_has_segments) {
1842 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1843 sizeof(kvm_vz_get_one_regs_segments)))
1845 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1847 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1848 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1849 sizeof(kvm_vz_get_one_regs_htw)))
1851 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1853 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1854 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1855 index = KVM_REG_MIPS_CP0_MAAR(i);
1856 if (copy_to_user(indices, &index, sizeof(index)))
1861 index = KVM_REG_MIPS_CP0_MAARI;
1862 if (copy_to_user(indices, &index, sizeof(index)))
1866 for (i = 0; i < 6; ++i) {
1867 if (!cpu_guest_has_kscr(i + 2))
1870 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1871 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1879 static inline s64 entrylo_kvm_to_user(unsigned long v)
1883 if (BITS_PER_LONG == 32) {
1885 * KVM API exposes 64-bit version of the register, so move the
1886 * RI/XI bits up into place.
1888 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1890 ret |= ((s64)v & mask) << 32;
1895 static inline unsigned long entrylo_user_to_kvm(s64 v)
1897 unsigned long mask, ret = v;
1899 if (BITS_PER_LONG == 32) {
1901 * KVM API exposes 64-bit versiono of the register, so move the
1902 * RI/XI bits down into place.
1904 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1906 ret |= (v >> 32) & mask;
1911 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1912 const struct kvm_one_reg *reg,
1915 struct mips_coproc *cop0 = vcpu->arch.cop0;
1919 case KVM_REG_MIPS_CP0_INDEX:
1920 *v = (long)read_gc0_index();
1922 case KVM_REG_MIPS_CP0_ENTRYLO0:
1923 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1925 case KVM_REG_MIPS_CP0_ENTRYLO1:
1926 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1928 case KVM_REG_MIPS_CP0_CONTEXT:
1929 *v = (long)read_gc0_context();
1931 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1932 if (!cpu_guest_has_contextconfig)
1934 *v = read_gc0_contextconfig();
1936 case KVM_REG_MIPS_CP0_USERLOCAL:
1937 if (!cpu_guest_has_userlocal)
1939 *v = read_gc0_userlocal();
1942 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1943 if (!cpu_guest_has_contextconfig)
1945 *v = read_gc0_xcontextconfig();
1948 case KVM_REG_MIPS_CP0_PAGEMASK:
1949 *v = (long)read_gc0_pagemask();
1951 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1952 *v = (long)read_gc0_pagegrain();
1954 case KVM_REG_MIPS_CP0_SEGCTL0:
1955 if (!cpu_guest_has_segments)
1957 *v = read_gc0_segctl0();
1959 case KVM_REG_MIPS_CP0_SEGCTL1:
1960 if (!cpu_guest_has_segments)
1962 *v = read_gc0_segctl1();
1964 case KVM_REG_MIPS_CP0_SEGCTL2:
1965 if (!cpu_guest_has_segments)
1967 *v = read_gc0_segctl2();
1969 case KVM_REG_MIPS_CP0_PWBASE:
1970 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1972 *v = read_gc0_pwbase();
1974 case KVM_REG_MIPS_CP0_PWFIELD:
1975 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1977 *v = read_gc0_pwfield();
1979 case KVM_REG_MIPS_CP0_PWSIZE:
1980 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1982 *v = read_gc0_pwsize();
1984 case KVM_REG_MIPS_CP0_WIRED:
1985 *v = (long)read_gc0_wired();
1987 case KVM_REG_MIPS_CP0_PWCTL:
1988 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1990 *v = read_gc0_pwctl();
1992 case KVM_REG_MIPS_CP0_HWRENA:
1993 *v = (long)read_gc0_hwrena();
1995 case KVM_REG_MIPS_CP0_BADVADDR:
1996 *v = (long)read_gc0_badvaddr();
1998 case KVM_REG_MIPS_CP0_BADINSTR:
1999 if (!cpu_guest_has_badinstr)
2001 *v = read_gc0_badinstr();
2003 case KVM_REG_MIPS_CP0_BADINSTRP:
2004 if (!cpu_guest_has_badinstrp)
2006 *v = read_gc0_badinstrp();
2008 case KVM_REG_MIPS_CP0_COUNT:
2009 *v = kvm_mips_read_count(vcpu);
2011 case KVM_REG_MIPS_CP0_ENTRYHI:
2012 *v = (long)read_gc0_entryhi();
2014 case KVM_REG_MIPS_CP0_COMPARE:
2015 *v = (long)read_gc0_compare();
2017 case KVM_REG_MIPS_CP0_STATUS:
2018 *v = (long)read_gc0_status();
2020 case KVM_REG_MIPS_CP0_INTCTL:
2021 *v = read_gc0_intctl();
2023 case KVM_REG_MIPS_CP0_CAUSE:
2024 *v = (long)read_gc0_cause();
2026 case KVM_REG_MIPS_CP0_EPC:
2027 *v = (long)read_gc0_epc();
2029 case KVM_REG_MIPS_CP0_PRID:
2030 switch (boot_cpu_type()) {
2031 case CPU_CAVIUM_OCTEON3:
2032 /* Octeon III has a read-only guest.PRid */
2033 *v = read_gc0_prid();
2036 *v = (long)kvm_read_c0_guest_prid(cop0);
2040 case KVM_REG_MIPS_CP0_EBASE:
2041 *v = kvm_vz_read_gc0_ebase();
2043 case KVM_REG_MIPS_CP0_CONFIG:
2044 *v = read_gc0_config();
2046 case KVM_REG_MIPS_CP0_CONFIG1:
2047 if (!cpu_guest_has_conf1)
2049 *v = read_gc0_config1();
2051 case KVM_REG_MIPS_CP0_CONFIG2:
2052 if (!cpu_guest_has_conf2)
2054 *v = read_gc0_config2();
2056 case KVM_REG_MIPS_CP0_CONFIG3:
2057 if (!cpu_guest_has_conf3)
2059 *v = read_gc0_config3();
2061 case KVM_REG_MIPS_CP0_CONFIG4:
2062 if (!cpu_guest_has_conf4)
2064 *v = read_gc0_config4();
2066 case KVM_REG_MIPS_CP0_CONFIG5:
2067 if (!cpu_guest_has_conf5)
2069 *v = read_gc0_config5();
2071 case KVM_REG_MIPS_CP0_CONFIG6:
2072 *v = kvm_read_sw_gc0_config6(cop0);
2074 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2075 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2077 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2078 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2080 *v = vcpu->arch.maar[idx];
2082 case KVM_REG_MIPS_CP0_MAARI:
2083 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2085 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2088 case KVM_REG_MIPS_CP0_XCONTEXT:
2089 *v = read_gc0_xcontext();
2092 case KVM_REG_MIPS_CP0_ERROREPC:
2093 *v = (long)read_gc0_errorepc();
2095 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2096 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2097 if (!cpu_guest_has_kscr(idx))
2101 *v = (long)read_gc0_kscratch1();
2104 *v = (long)read_gc0_kscratch2();
2107 *v = (long)read_gc0_kscratch3();
2110 *v = (long)read_gc0_kscratch4();
2113 *v = (long)read_gc0_kscratch5();
2116 *v = (long)read_gc0_kscratch6();
2120 case KVM_REG_MIPS_COUNT_CTL:
2121 *v = vcpu->arch.count_ctl;
2123 case KVM_REG_MIPS_COUNT_RESUME:
2124 *v = ktime_to_ns(vcpu->arch.count_resume);
2126 case KVM_REG_MIPS_COUNT_HZ:
2127 *v = vcpu->arch.count_hz;
2135 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2136 const struct kvm_one_reg *reg,
2139 struct mips_coproc *cop0 = vcpu->arch.cop0;
2142 unsigned int cur, change;
2145 case KVM_REG_MIPS_CP0_INDEX:
2148 case KVM_REG_MIPS_CP0_ENTRYLO0:
2149 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2151 case KVM_REG_MIPS_CP0_ENTRYLO1:
2152 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2154 case KVM_REG_MIPS_CP0_CONTEXT:
2155 write_gc0_context(v);
2157 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2158 if (!cpu_guest_has_contextconfig)
2160 write_gc0_contextconfig(v);
2162 case KVM_REG_MIPS_CP0_USERLOCAL:
2163 if (!cpu_guest_has_userlocal)
2165 write_gc0_userlocal(v);
2168 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2169 if (!cpu_guest_has_contextconfig)
2171 write_gc0_xcontextconfig(v);
2174 case KVM_REG_MIPS_CP0_PAGEMASK:
2175 write_gc0_pagemask(v);
2177 case KVM_REG_MIPS_CP0_PAGEGRAIN:
2178 write_gc0_pagegrain(v);
2180 case KVM_REG_MIPS_CP0_SEGCTL0:
2181 if (!cpu_guest_has_segments)
2183 write_gc0_segctl0(v);
2185 case KVM_REG_MIPS_CP0_SEGCTL1:
2186 if (!cpu_guest_has_segments)
2188 write_gc0_segctl1(v);
2190 case KVM_REG_MIPS_CP0_SEGCTL2:
2191 if (!cpu_guest_has_segments)
2193 write_gc0_segctl2(v);
2195 case KVM_REG_MIPS_CP0_PWBASE:
2196 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2198 write_gc0_pwbase(v);
2200 case KVM_REG_MIPS_CP0_PWFIELD:
2201 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2203 write_gc0_pwfield(v);
2205 case KVM_REG_MIPS_CP0_PWSIZE:
2206 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2208 write_gc0_pwsize(v);
2210 case KVM_REG_MIPS_CP0_WIRED:
2211 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2213 case KVM_REG_MIPS_CP0_PWCTL:
2214 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2218 case KVM_REG_MIPS_CP0_HWRENA:
2219 write_gc0_hwrena(v);
2221 case KVM_REG_MIPS_CP0_BADVADDR:
2222 write_gc0_badvaddr(v);
2224 case KVM_REG_MIPS_CP0_BADINSTR:
2225 if (!cpu_guest_has_badinstr)
2227 write_gc0_badinstr(v);
2229 case KVM_REG_MIPS_CP0_BADINSTRP:
2230 if (!cpu_guest_has_badinstrp)
2232 write_gc0_badinstrp(v);
2234 case KVM_REG_MIPS_CP0_COUNT:
2235 kvm_mips_write_count(vcpu, v);
2237 case KVM_REG_MIPS_CP0_ENTRYHI:
2238 write_gc0_entryhi(v);
2240 case KVM_REG_MIPS_CP0_COMPARE:
2241 kvm_mips_write_compare(vcpu, v, false);
2243 case KVM_REG_MIPS_CP0_STATUS:
2244 write_gc0_status(v);
2246 case KVM_REG_MIPS_CP0_INTCTL:
2247 write_gc0_intctl(v);
2249 case KVM_REG_MIPS_CP0_CAUSE:
2251 * If the timer is stopped or started (DC bit) it must look
2252 * atomic with changes to the timer interrupt pending bit (TI).
2253 * A timer interrupt should not happen in between.
2255 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2256 if (v & CAUSEF_DC) {
2257 /* disable timer first */
2258 kvm_mips_count_disable_cause(vcpu);
2259 change_gc0_cause((u32)~CAUSEF_DC, v);
2261 /* enable timer last */
2262 change_gc0_cause((u32)~CAUSEF_DC, v);
2263 kvm_mips_count_enable_cause(vcpu);
2269 case KVM_REG_MIPS_CP0_EPC:
2272 case KVM_REG_MIPS_CP0_PRID:
2273 switch (boot_cpu_type()) {
2274 case CPU_CAVIUM_OCTEON3:
2275 /* Octeon III has a guest.PRid, but its read-only */
2278 kvm_write_c0_guest_prid(cop0, v);
2282 case KVM_REG_MIPS_CP0_EBASE:
2283 kvm_vz_write_gc0_ebase(v);
2285 case KVM_REG_MIPS_CP0_CONFIG:
2286 cur = read_gc0_config();
2287 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2290 write_gc0_config(v);
2293 case KVM_REG_MIPS_CP0_CONFIG1:
2294 if (!cpu_guest_has_conf1)
2296 cur = read_gc0_config1();
2297 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2300 write_gc0_config1(v);
2303 case KVM_REG_MIPS_CP0_CONFIG2:
2304 if (!cpu_guest_has_conf2)
2306 cur = read_gc0_config2();
2307 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2310 write_gc0_config2(v);
2313 case KVM_REG_MIPS_CP0_CONFIG3:
2314 if (!cpu_guest_has_conf3)
2316 cur = read_gc0_config3();
2317 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2320 write_gc0_config3(v);
2323 case KVM_REG_MIPS_CP0_CONFIG4:
2324 if (!cpu_guest_has_conf4)
2326 cur = read_gc0_config4();
2327 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2330 write_gc0_config4(v);
2333 case KVM_REG_MIPS_CP0_CONFIG5:
2334 if (!cpu_guest_has_conf5)
2336 cur = read_gc0_config5();
2337 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2340 write_gc0_config5(v);
2343 case KVM_REG_MIPS_CP0_CONFIG6:
2344 cur = kvm_read_sw_gc0_config6(cop0);
2345 change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2348 kvm_write_sw_gc0_config6(cop0, (int)v);
2351 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2352 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2354 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2355 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2357 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2359 case KVM_REG_MIPS_CP0_MAARI:
2360 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2362 kvm_write_maari(vcpu, v);
2365 case KVM_REG_MIPS_CP0_XCONTEXT:
2366 write_gc0_xcontext(v);
2369 case KVM_REG_MIPS_CP0_ERROREPC:
2370 write_gc0_errorepc(v);
2372 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2373 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2374 if (!cpu_guest_has_kscr(idx))
2378 write_gc0_kscratch1(v);
2381 write_gc0_kscratch2(v);
2384 write_gc0_kscratch3(v);
2387 write_gc0_kscratch4(v);
2390 write_gc0_kscratch5(v);
2393 write_gc0_kscratch6(v);
2397 case KVM_REG_MIPS_COUNT_CTL:
2398 ret = kvm_mips_set_count_ctl(vcpu, v);
2400 case KVM_REG_MIPS_COUNT_RESUME:
2401 ret = kvm_mips_set_count_resume(vcpu, v);
2403 case KVM_REG_MIPS_COUNT_HZ:
2404 ret = kvm_mips_set_count_hz(vcpu, v);
2412 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2413 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2415 unsigned long guestid = guestid_cache(cpu);
2417 if (!(++guestid & GUESTID_MASK)) {
2418 if (cpu_has_vtag_icache)
2421 if (!guestid) /* fix version if needed */
2422 guestid = GUESTID_FIRST_VERSION;
2424 ++guestid; /* guestid 0 reserved for root */
2426 /* start new guestid cycle */
2427 kvm_vz_local_flush_roottlb_all_guests();
2428 kvm_vz_local_flush_guesttlb_all();
2431 guestid_cache(cpu) = guestid;
2434 /* Returns 1 if the guest TLB may be clobbered */
2435 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2440 if (!kvm_request_pending(vcpu))
2443 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2444 if (cpu_has_guestid) {
2445 /* Drop all GuestIDs for this VCPU */
2446 for_each_possible_cpu(i)
2447 vcpu->arch.vzguestid[i] = 0;
2448 /* This will clobber guest TLB contents too */
2452 * For Root ASID Dealias (RAD) we don't do anything here, but we
2453 * still need the request to ensure we recheck asid_flush_mask.
2454 * We can still return 0 as only the root TLB will be affected
2455 * by a root ASID flush.
2462 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2464 unsigned int wired = read_gc0_wired();
2465 struct kvm_mips_tlb *tlbs;
2468 /* Expand the wired TLB array if necessary */
2469 wired &= MIPSR6_WIRED_WIRED;
2470 if (wired > vcpu->arch.wired_tlb_limit) {
2471 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2472 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2473 if (WARN_ON(!tlbs)) {
2474 /* Save whatever we can */
2475 wired = vcpu->arch.wired_tlb_limit;
2477 vcpu->arch.wired_tlb = tlbs;
2478 vcpu->arch.wired_tlb_limit = wired;
2483 /* Save wired entries from the guest TLB */
2484 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2485 /* Invalidate any dropped entries since last time */
2486 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2487 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2488 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2489 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2490 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2492 vcpu->arch.wired_tlb_used = wired;
2495 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2497 /* Load wired entries into the guest TLB */
2498 if (vcpu->arch.wired_tlb)
2499 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2500 vcpu->arch.wired_tlb_used);
2503 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2505 struct kvm *kvm = vcpu->kvm;
2506 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2510 * Are we entering guest context on a different CPU to last time?
2511 * If so, the VCPU's guest TLB state on this CPU may be stale.
2513 migrated = (vcpu->arch.last_exec_cpu != cpu);
2514 vcpu->arch.last_exec_cpu = cpu;
2517 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2518 * remains set until another vcpu is loaded in. As a rule GuestRID
2519 * remains zeroed when in root context unless the kernel is busy
2520 * manipulating guest tlb entries.
2522 if (cpu_has_guestid) {
2524 * Check if our GuestID is of an older version and thus invalid.
2526 * We also discard the stored GuestID if we've executed on
2527 * another CPU, as the guest mappings may have changed without
2528 * hypervisor knowledge.
2531 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2532 GUESTID_VERSION_MASK) {
2533 kvm_vz_get_new_guestid(cpu, vcpu);
2534 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2535 trace_kvm_guestid_change(vcpu,
2536 vcpu->arch.vzguestid[cpu]);
2539 /* Restore GuestID */
2540 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2543 * The Guest TLB only stores a single guest's TLB state, so
2544 * flush it if another VCPU has executed on this CPU.
2546 * We also flush if we've executed on another CPU, as the guest
2547 * mappings may have changed without hypervisor knowledge.
2549 if (migrated || last_exec_vcpu[cpu] != vcpu)
2550 kvm_vz_local_flush_guesttlb_all();
2551 last_exec_vcpu[cpu] = vcpu;
2554 * Root ASID dealiases guest GPA mappings in the root TLB.
2555 * Allocate new root ASID if needed.
2557 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2558 get_new_mmu_context(gpa_mm);
2560 check_mmu_context(gpa_mm);
2564 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2566 struct mips_coproc *cop0 = vcpu->arch.cop0;
2570 * Have we migrated to a different CPU?
2571 * If so, any old guest TLB state may be stale.
2573 migrated = (vcpu->arch.last_sched_cpu != cpu);
2576 * Was this the last VCPU to run on this CPU?
2577 * If not, any old guest state from this VCPU will have been clobbered.
2579 all = migrated || (last_vcpu[cpu] != vcpu);
2580 last_vcpu[cpu] = vcpu;
2583 * Restore CP0_Wired unconditionally as we clear it after use, and
2584 * restore wired guest TLB entries (while in guest context).
2586 kvm_restore_gc0_wired(cop0);
2587 if (current->flags & PF_VCPU) {
2589 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2590 kvm_vz_vcpu_load_wired(vcpu);
2594 * Restore timer state regardless, as e.g. Cause.TI can change over time
2595 * if left unmaintained.
2597 kvm_vz_restore_timer(vcpu);
2599 /* Set MC bit if we want to trace guest mode changes */
2600 if (kvm_trace_guest_mode_change)
2601 set_c0_guestctl0(MIPS_GCTL0_MC);
2603 clear_c0_guestctl0(MIPS_GCTL0_MC);
2605 /* Don't bother restoring registers multiple times unless necessary */
2610 * Restore config registers first, as some implementations restrict
2611 * writes to other registers when the corresponding feature bits aren't
2612 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2614 kvm_restore_gc0_config(cop0);
2615 if (cpu_guest_has_conf1)
2616 kvm_restore_gc0_config1(cop0);
2617 if (cpu_guest_has_conf2)
2618 kvm_restore_gc0_config2(cop0);
2619 if (cpu_guest_has_conf3)
2620 kvm_restore_gc0_config3(cop0);
2621 if (cpu_guest_has_conf4)
2622 kvm_restore_gc0_config4(cop0);
2623 if (cpu_guest_has_conf5)
2624 kvm_restore_gc0_config5(cop0);
2625 if (cpu_guest_has_conf6)
2626 kvm_restore_gc0_config6(cop0);
2627 if (cpu_guest_has_conf7)
2628 kvm_restore_gc0_config7(cop0);
2630 kvm_restore_gc0_index(cop0);
2631 kvm_restore_gc0_entrylo0(cop0);
2632 kvm_restore_gc0_entrylo1(cop0);
2633 kvm_restore_gc0_context(cop0);
2634 if (cpu_guest_has_contextconfig)
2635 kvm_restore_gc0_contextconfig(cop0);
2637 kvm_restore_gc0_xcontext(cop0);
2638 if (cpu_guest_has_contextconfig)
2639 kvm_restore_gc0_xcontextconfig(cop0);
2641 kvm_restore_gc0_pagemask(cop0);
2642 kvm_restore_gc0_pagegrain(cop0);
2643 kvm_restore_gc0_hwrena(cop0);
2644 kvm_restore_gc0_badvaddr(cop0);
2645 kvm_restore_gc0_entryhi(cop0);
2646 kvm_restore_gc0_status(cop0);
2647 kvm_restore_gc0_intctl(cop0);
2648 kvm_restore_gc0_epc(cop0);
2649 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2650 if (cpu_guest_has_userlocal)
2651 kvm_restore_gc0_userlocal(cop0);
2653 kvm_restore_gc0_errorepc(cop0);
2655 /* restore KScratch registers if enabled in guest */
2656 if (cpu_guest_has_conf4) {
2657 if (cpu_guest_has_kscr(2))
2658 kvm_restore_gc0_kscratch1(cop0);
2659 if (cpu_guest_has_kscr(3))
2660 kvm_restore_gc0_kscratch2(cop0);
2661 if (cpu_guest_has_kscr(4))
2662 kvm_restore_gc0_kscratch3(cop0);
2663 if (cpu_guest_has_kscr(5))
2664 kvm_restore_gc0_kscratch4(cop0);
2665 if (cpu_guest_has_kscr(6))
2666 kvm_restore_gc0_kscratch5(cop0);
2667 if (cpu_guest_has_kscr(7))
2668 kvm_restore_gc0_kscratch6(cop0);
2671 if (cpu_guest_has_badinstr)
2672 kvm_restore_gc0_badinstr(cop0);
2673 if (cpu_guest_has_badinstrp)
2674 kvm_restore_gc0_badinstrp(cop0);
2676 if (cpu_guest_has_segments) {
2677 kvm_restore_gc0_segctl0(cop0);
2678 kvm_restore_gc0_segctl1(cop0);
2679 kvm_restore_gc0_segctl2(cop0);
2682 /* restore HTW registers */
2683 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2684 kvm_restore_gc0_pwbase(cop0);
2685 kvm_restore_gc0_pwfield(cop0);
2686 kvm_restore_gc0_pwsize(cop0);
2687 kvm_restore_gc0_pwctl(cop0);
2690 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2691 if (cpu_has_guestctl2)
2693 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2696 * We should clear linked load bit to break interrupted atomics. This
2697 * prevents a SC on the next VCPU from succeeding by matching a LL on
2698 * the previous VCPU.
2700 if (vcpu->kvm->created_vcpus > 1)
2701 write_gc0_lladdr(0);
2706 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2708 struct mips_coproc *cop0 = vcpu->arch.cop0;
2710 if (current->flags & PF_VCPU)
2711 kvm_vz_vcpu_save_wired(vcpu);
2715 kvm_save_gc0_index(cop0);
2716 kvm_save_gc0_entrylo0(cop0);
2717 kvm_save_gc0_entrylo1(cop0);
2718 kvm_save_gc0_context(cop0);
2719 if (cpu_guest_has_contextconfig)
2720 kvm_save_gc0_contextconfig(cop0);
2722 kvm_save_gc0_xcontext(cop0);
2723 if (cpu_guest_has_contextconfig)
2724 kvm_save_gc0_xcontextconfig(cop0);
2726 kvm_save_gc0_pagemask(cop0);
2727 kvm_save_gc0_pagegrain(cop0);
2728 kvm_save_gc0_wired(cop0);
2729 /* allow wired TLB entries to be overwritten */
2730 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2731 kvm_save_gc0_hwrena(cop0);
2732 kvm_save_gc0_badvaddr(cop0);
2733 kvm_save_gc0_entryhi(cop0);
2734 kvm_save_gc0_status(cop0);
2735 kvm_save_gc0_intctl(cop0);
2736 kvm_save_gc0_epc(cop0);
2737 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2738 if (cpu_guest_has_userlocal)
2739 kvm_save_gc0_userlocal(cop0);
2741 /* only save implemented config registers */
2742 kvm_save_gc0_config(cop0);
2743 if (cpu_guest_has_conf1)
2744 kvm_save_gc0_config1(cop0);
2745 if (cpu_guest_has_conf2)
2746 kvm_save_gc0_config2(cop0);
2747 if (cpu_guest_has_conf3)
2748 kvm_save_gc0_config3(cop0);
2749 if (cpu_guest_has_conf4)
2750 kvm_save_gc0_config4(cop0);
2751 if (cpu_guest_has_conf5)
2752 kvm_save_gc0_config5(cop0);
2753 if (cpu_guest_has_conf6)
2754 kvm_save_gc0_config6(cop0);
2755 if (cpu_guest_has_conf7)
2756 kvm_save_gc0_config7(cop0);
2758 kvm_save_gc0_errorepc(cop0);
2760 /* save KScratch registers if enabled in guest */
2761 if (cpu_guest_has_conf4) {
2762 if (cpu_guest_has_kscr(2))
2763 kvm_save_gc0_kscratch1(cop0);
2764 if (cpu_guest_has_kscr(3))
2765 kvm_save_gc0_kscratch2(cop0);
2766 if (cpu_guest_has_kscr(4))
2767 kvm_save_gc0_kscratch3(cop0);
2768 if (cpu_guest_has_kscr(5))
2769 kvm_save_gc0_kscratch4(cop0);
2770 if (cpu_guest_has_kscr(6))
2771 kvm_save_gc0_kscratch5(cop0);
2772 if (cpu_guest_has_kscr(7))
2773 kvm_save_gc0_kscratch6(cop0);
2776 if (cpu_guest_has_badinstr)
2777 kvm_save_gc0_badinstr(cop0);
2778 if (cpu_guest_has_badinstrp)
2779 kvm_save_gc0_badinstrp(cop0);
2781 if (cpu_guest_has_segments) {
2782 kvm_save_gc0_segctl0(cop0);
2783 kvm_save_gc0_segctl1(cop0);
2784 kvm_save_gc0_segctl2(cop0);
2787 /* save HTW registers if enabled in guest */
2788 if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2789 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2790 kvm_save_gc0_pwbase(cop0);
2791 kvm_save_gc0_pwfield(cop0);
2792 kvm_save_gc0_pwsize(cop0);
2793 kvm_save_gc0_pwctl(cop0);
2796 kvm_vz_save_timer(vcpu);
2798 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2799 if (cpu_has_guestctl2)
2800 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2801 read_c0_guestctl2();
2807 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2808 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2810 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2811 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2812 * entries in the root VTLB.
2814 * Returns: The resulting guest VTLB size.
2816 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2818 unsigned int config4 = 0, ret = 0, limit;
2820 /* Write MMUSize - 1 into guest Config registers */
2821 if (cpu_guest_has_conf1)
2822 change_gc0_config1(MIPS_CONF1_TLBS,
2823 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2824 if (cpu_guest_has_conf4) {
2825 config4 = read_gc0_config4();
2826 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2827 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2828 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2829 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2830 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2831 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2832 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2833 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2834 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2835 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2837 write_gc0_config4(config4);
2841 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2842 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2845 if (cpu_has_mips_r6) {
2846 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2847 MIPSR6_WIRED_LIMIT_SHIFT;
2848 if (size - 1 <= limit)
2850 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2853 /* Read back MMUSize - 1 */
2854 back_to_back_c0_hazard();
2855 if (cpu_guest_has_conf1)
2856 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2857 MIPS_CONF1_TLBS_SHIFT;
2859 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2860 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2861 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2862 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2863 MIPS_CONF1_TLBS_SIZE;
2864 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2865 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2866 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2867 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2868 MIPS_CONF1_TLBS_SIZE;
2873 static int kvm_vz_hardware_enable(void)
2875 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2876 u64 guest_cvmctl, cvmvmconfig;
2878 switch (current_cpu_type()) {
2879 case CPU_CAVIUM_OCTEON3:
2880 /* Set up guest timer/perfcount IRQ lines */
2881 guest_cvmctl = read_gc0_cvmctl();
2882 guest_cvmctl &= ~CVMCTL_IPTI;
2883 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2884 guest_cvmctl &= ~CVMCTL_IPPCI;
2885 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2886 write_gc0_cvmctl(guest_cvmctl);
2888 cvmvmconfig = read_c0_cvmvmconfig();
2889 /* No I/O hole translation. */
2890 cvmvmconfig |= CVMVMCONF_DGHT;
2891 /* Halve the root MMU size */
2892 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2893 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2894 guest_mmu_size = mmu_size / 2;
2895 mmu_size -= guest_mmu_size;
2896 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2897 cvmvmconfig |= mmu_size - 1;
2898 write_c0_cvmvmconfig(cvmvmconfig);
2900 /* Update our records */
2901 current_cpu_data.tlbsize = mmu_size;
2902 current_cpu_data.tlbsizevtlb = mmu_size;
2903 current_cpu_data.guest.tlbsize = guest_mmu_size;
2905 /* Flush moved entries in new (guest) context */
2906 kvm_vz_local_flush_guesttlb_all();
2910 * ImgTec cores tend to use a shared root/guest TLB. To avoid
2911 * overlap of root wired and guest entries, the guest TLB may
2914 mmu_size = current_cpu_data.tlbsizevtlb;
2915 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2917 /* Try switching to maximum guest VTLB size for flush */
2918 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2919 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2920 kvm_vz_local_flush_guesttlb_all();
2923 * Reduce to make space for root wired entries and at least 2
2924 * root non-wired entries. This does assume that long-term wired
2925 * entries won't be added later.
2927 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2928 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2929 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2932 * Write the VTLB size, but if another CPU has already written,
2933 * check it matches or we won't provide a consistent view to the
2934 * guest. If this ever happens it suggests an asymmetric number
2937 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2938 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2939 "Available guest VTLB size mismatch"))
2945 * Enable virtualization features granting guest direct control of
2947 * CP0=1: Guest coprocessor 0 context.
2948 * AT=Guest: Guest MMU.
2949 * CG=1: Hit (virtual address) CACHE operations (optional).
2950 * CF=1: Guest Config registers.
2951 * CGI=1: Indexed flush CACHE operations (optional).
2953 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2954 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2955 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2956 if (cpu_has_guestctl0ext) {
2957 if (current_cpu_type() != CPU_LOONGSON64)
2958 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2960 clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2963 if (cpu_has_guestid) {
2964 write_c0_guestctl1(0);
2965 kvm_vz_local_flush_roottlb_all_guests();
2967 GUESTID_MASK = current_cpu_data.guestid_mask;
2968 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2969 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2971 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2974 /* clear any pending injected virtual guest interrupts */
2975 if (cpu_has_guestctl2)
2976 clear_c0_guestctl2(0x3f << 10);
2978 #ifdef CONFIG_CPU_LOONGSON64
2979 /* Control guest CCA attribute */
2981 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2987 static void kvm_vz_hardware_disable(void)
2990 unsigned int mmu_size;
2992 /* Flush any remaining guest TLB entries */
2993 kvm_vz_local_flush_guesttlb_all();
2995 switch (current_cpu_type()) {
2996 case CPU_CAVIUM_OCTEON3:
2998 * Allocate whole TLB for root. Existing guest TLB entries will
2999 * change ownership to the root TLB. We should be safe though as
3000 * they've already been flushed above while in guest TLB.
3002 cvmvmconfig = read_c0_cvmvmconfig();
3003 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
3004 >> CVMVMCONF_MMUSIZEM1_S) + 1;
3005 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3006 cvmvmconfig |= mmu_size - 1;
3007 write_c0_cvmvmconfig(cvmvmconfig);
3009 /* Update our records */
3010 current_cpu_data.tlbsize = mmu_size;
3011 current_cpu_data.tlbsizevtlb = mmu_size;
3012 current_cpu_data.guest.tlbsize = 0;
3014 /* Flush moved entries in new (root) context */
3015 local_flush_tlb_all();
3019 if (cpu_has_guestid) {
3020 write_c0_guestctl1(0);
3021 kvm_vz_local_flush_roottlb_all_guests();
3025 static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3030 case KVM_CAP_MIPS_VZ:
3031 /* we wouldn't be here unless cpu_has_vz */
3035 case KVM_CAP_MIPS_64BIT:
3036 /* We support 64-bit registers/operations and addresses */
3040 case KVM_CAP_IOEVENTFD:
3051 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3055 for_each_possible_cpu(i)
3056 vcpu->arch.vzguestid[i] = 0;
3061 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3066 * If the VCPU is freed and reused as another VCPU, we don't want the
3067 * matching pointer wrongly hanging around in last_vcpu[] or
3070 for_each_possible_cpu(cpu) {
3071 if (last_vcpu[cpu] == vcpu)
3072 last_vcpu[cpu] = NULL;
3073 if (last_exec_vcpu[cpu] == vcpu)
3074 last_exec_vcpu[cpu] = NULL;
3078 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3080 struct mips_coproc *cop0 = vcpu->arch.cop0;
3081 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3084 * Start off the timer at the same frequency as the host timer, but the
3085 * soft timer doesn't handle frequencies greater than 1GHz yet.
3087 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3088 count_hz = mips_hpt_frequency;
3089 kvm_mips_init_count(vcpu, count_hz);
3092 * Initialize guest register state to valid architectural reset state.
3096 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3097 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3099 if (cpu_has_mips_r6)
3100 kvm_write_sw_gc0_wired(cop0,
3101 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3103 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3104 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3105 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3107 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3108 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3110 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3112 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3114 kvm_save_gc0_config(cop0);
3115 /* architecturally writable (e.g. from guest) */
3116 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3117 _page_cachable_default >> _CACHE_SHIFT);
3118 /* architecturally read only, but maybe writable from root */
3119 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3120 if (cpu_guest_has_conf1) {
3121 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3123 kvm_save_gc0_config1(cop0);
3124 /* architecturally read only, but maybe writable from root */
3125 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3132 if (cpu_guest_has_conf2) {
3133 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3135 kvm_save_gc0_config2(cop0);
3137 if (cpu_guest_has_conf3) {
3138 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3140 kvm_save_gc0_config3(cop0);
3141 /* architecturally writable (e.g. from guest) */
3142 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3143 /* architecturally read only, but maybe writable from root */
3144 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3159 if (cpu_guest_has_conf4) {
3160 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3162 kvm_save_gc0_config4(cop0);
3164 if (cpu_guest_has_conf5) {
3165 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3167 kvm_save_gc0_config5(cop0);
3168 /* architecturally writable (e.g. from guest) */
3169 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3176 /* architecturally read only, but maybe writable from root */
3177 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3180 if (cpu_guest_has_contextconfig) {
3182 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3184 /* XContextConfig */
3185 /* bits SEGBITS-13+3:4 set */
3186 kvm_write_sw_gc0_xcontextconfig(cop0,
3187 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3191 /* Implementation dependent, use the legacy layout */
3192 if (cpu_guest_has_segments) {
3193 /* SegCtl0, SegCtl1, SegCtl2 */
3194 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3195 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3196 (_page_cachable_default >> _CACHE_SHIFT) <<
3197 (16 + MIPS_SEGCFG_C_SHIFT));
3198 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3201 /* reset HTW registers */
3202 if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3204 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3206 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3209 /* start with no pending virtual guest interrupts */
3210 if (cpu_has_guestctl2)
3211 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3213 /* Put PC at reset vector */
3214 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3219 static void kvm_vz_flush_shadow_all(struct kvm *kvm)
3221 if (cpu_has_guestid) {
3222 /* Flush GuestID for each VCPU individually */
3223 kvm_flush_remote_tlbs(kvm);
3226 * For each CPU there is a single GPA ASID used by all VCPUs in
3227 * the VM, so it doesn't make sense for the VCPUs to handle
3228 * invalidation of these ASIDs individually.
3230 * Instead mark all CPUs as needing ASID invalidation in
3231 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
3232 * kick any running VCPUs so they check asid_flush_mask.
3234 cpumask_setall(&kvm->arch.asid_flush_mask);
3235 kvm_flush_remote_tlbs(kvm);
3239 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
3240 const struct kvm_memory_slot *slot)
3242 kvm_vz_flush_shadow_all(kvm);
3245 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
3247 int cpu = smp_processor_id();
3248 int preserve_guest_tlb;
3250 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3252 if (preserve_guest_tlb)
3253 kvm_vz_vcpu_save_wired(vcpu);
3255 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3257 if (preserve_guest_tlb)
3258 kvm_vz_vcpu_load_wired(vcpu);
3261 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
3263 int cpu = smp_processor_id();
3266 kvm_vz_acquire_htimer(vcpu);
3267 /* Check if we have any exceptions/interrupts pending */
3268 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3270 kvm_vz_check_requests(vcpu, cpu);
3271 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3272 kvm_vz_vcpu_load_wired(vcpu);
3274 r = vcpu->arch.vcpu_run(run, vcpu);
3276 kvm_vz_vcpu_save_wired(vcpu);
3281 static struct kvm_mips_callbacks kvm_vz_callbacks = {
3282 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3283 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3284 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3285 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3286 .handle_addr_err_st = kvm_trap_vz_no_handler,
3287 .handle_addr_err_ld = kvm_trap_vz_no_handler,
3288 .handle_syscall = kvm_trap_vz_no_handler,
3289 .handle_res_inst = kvm_trap_vz_no_handler,
3290 .handle_break = kvm_trap_vz_no_handler,
3291 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3292 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3294 .hardware_enable = kvm_vz_hardware_enable,
3295 .hardware_disable = kvm_vz_hardware_disable,
3296 .check_extension = kvm_vz_check_extension,
3297 .vcpu_init = kvm_vz_vcpu_init,
3298 .vcpu_uninit = kvm_vz_vcpu_uninit,
3299 .vcpu_setup = kvm_vz_vcpu_setup,
3300 .flush_shadow_all = kvm_vz_flush_shadow_all,
3301 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
3302 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3303 .queue_timer_int = kvm_vz_queue_timer_int_cb,
3304 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3305 .queue_io_int = kvm_vz_queue_io_int_cb,
3306 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3307 .irq_deliver = kvm_vz_irq_deliver_cb,
3308 .irq_clear = kvm_vz_irq_clear_cb,
3309 .num_regs = kvm_vz_num_regs,
3310 .copy_reg_indices = kvm_vz_copy_reg_indices,
3311 .get_one_reg = kvm_vz_get_one_reg,
3312 .set_one_reg = kvm_vz_set_one_reg,
3313 .vcpu_load = kvm_vz_vcpu_load,
3314 .vcpu_put = kvm_vz_vcpu_put,
3315 .vcpu_run = kvm_vz_vcpu_run,
3316 .vcpu_reenter = kvm_vz_vcpu_reenter,
3319 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3325 * VZ requires at least 2 KScratch registers, so it should have been
3326 * possible to allocate pgd_reg.
3328 if (WARN(pgd_reg == -1,
3329 "pgd_reg not allocated even though cpu_has_vz\n"))
3332 pr_info("Starting KVM with MIPS VZ extensions\n");
3334 *install_callbacks = &kvm_vz_callbacks;