Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / arch / mips / kvm / vz.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Support for hardware virtualization extensions
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Yann Le Du <ledu@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/preempt.h>
16 #include <linux/vmalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
19 #include <asm/cmpxchg.h>
20 #include <asm/fpu.h>
21 #include <asm/hazards.h>
22 #include <asm/inst.h>
23 #include <asm/mmu_context.h>
24 #include <asm/r4kcache.h>
25 #include <asm/time.h>
26 #include <asm/tlb.h>
27 #include <asm/tlbex.h>
28
29 #include <linux/kvm_host.h>
30
31 #include "interrupt.h"
32 #ifdef CONFIG_CPU_LOONGSON64
33 #include "loongson_regs.h"
34 #endif
35
36 #include "trace.h"
37
38 /* Pointers to last VCPU loaded on each physical CPU */
39 static struct kvm_vcpu *last_vcpu[NR_CPUS];
40 /* Pointers to last VCPU executed on each physical CPU */
41 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42
43 /*
44  * Number of guest VTLB entries to use, so we can catch inconsistency between
45  * CPUs.
46  */
47 static unsigned int kvm_vz_guest_vtlb_size;
48
49 static inline long kvm_vz_read_gc0_ebase(void)
50 {
51         if (sizeof(long) == 8 && cpu_has_ebase_wg)
52                 return read_gc0_ebase_64();
53         else
54                 return read_gc0_ebase();
55 }
56
57 static inline void kvm_vz_write_gc0_ebase(long v)
58 {
59         /*
60          * First write with WG=1 to write upper bits, then write again in case
61          * WG should be left at 0.
62          * write_gc0_ebase_64() is no longer UNDEFINED since R6.
63          */
64         if (sizeof(long) == 8 &&
65             (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
66                 write_gc0_ebase_64(v | MIPS_EBASE_WG);
67                 write_gc0_ebase_64(v);
68         } else {
69                 write_gc0_ebase(v | MIPS_EBASE_WG);
70                 write_gc0_ebase(v);
71         }
72 }
73
74 /*
75  * These Config bits may be writable by the guest:
76  * Config:      [K23, KU] (!TLB), K0
77  * Config1:     (none)
78  * Config2:     [TU, SU] (impl)
79  * Config3:     ISAOnExc
80  * Config4:     FTLBPageSize
81  * Config5:     K, CV, MSAEn, UFE, FRE, SBRI, UFR
82  */
83
84 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
85 {
86         return CONF_CM_CMASK;
87 }
88
89 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
90 {
91         return 0;
92 }
93
94 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
95 {
96         return 0;
97 }
98
99 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
100 {
101         return MIPS_CONF3_ISA_OE;
102 }
103
104 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
105 {
106         /* no need to be exact */
107         return MIPS_CONF4_VFTLBPAGESIZE;
108 }
109
110 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
111 {
112         unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
113
114         /* Permit MSAEn changes if MSA supported and enabled */
115         if (kvm_mips_guest_has_msa(&vcpu->arch))
116                 mask |= MIPS_CONF5_MSAEN;
117
118         /*
119          * Permit guest FPU mode changes if FPU is enabled and the relevant
120          * feature exists according to FIR register.
121          */
122         if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
123                 if (cpu_has_ufr)
124                         mask |= MIPS_CONF5_UFR;
125                 if (cpu_has_fre)
126                         mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
127         }
128
129         return mask;
130 }
131
132 static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133 {
134         return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135 }
136
137 /*
138  * VZ optionally allows these additional Config bits to be written by root:
139  * Config:      M, [MT]
140  * Config1:     M, [MMUSize-1, C2, MD, PC, WR, CA], FP
141  * Config2:     M
142  * Config3:     M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
143  *              VInt, SP, CDMM, MT, SM, TL]
144  * Config4:     M, [VTLBSizeExt, MMUSizeExt]
145  * Config5:     MRP
146  */
147
148 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
149 {
150         return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151 }
152
153 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
154 {
155         unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
156
157         /* Permit FPU to be present if FPU is supported */
158         if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
159                 mask |= MIPS_CONF1_FP;
160
161         return mask;
162 }
163
164 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
165 {
166         return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167 }
168
169 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
170 {
171         unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
172                 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
173
174         /* Permit MSA to be present if MSA is supported */
175         if (kvm_mips_guest_can_have_msa(&vcpu->arch))
176                 mask |= MIPS_CONF3_MSA;
177
178         return mask;
179 }
180
181 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
182 {
183         return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184 }
185
186 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
187 {
188         return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189 }
190
191 static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192 {
193         return kvm_vz_config6_guest_wrmask(vcpu) |
194                 LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195 }
196
197 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
198 {
199         /* VZ guest has already converted gva to gpa */
200         return gva;
201 }
202
203 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
204 {
205         set_bit(priority, &vcpu->arch.pending_exceptions);
206         clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207 }
208
209 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
210 {
211         clear_bit(priority, &vcpu->arch.pending_exceptions);
212         set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213 }
214
215 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216 {
217         /*
218          * timer expiry is asynchronous to vcpu execution therefore defer guest
219          * cp0 accesses
220          */
221         kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222 }
223
224 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225 {
226         /*
227          * timer expiry is asynchronous to vcpu execution therefore defer guest
228          * cp0 accesses
229          */
230         kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231 }
232
233 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
234                                    struct kvm_mips_interrupt *irq)
235 {
236         int intr = (int)irq->irq;
237
238         /*
239          * interrupts are asynchronous to vcpu execution therefore defer guest
240          * cp0 accesses
241          */
242         kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243 }
244
245 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
246                                      struct kvm_mips_interrupt *irq)
247 {
248         int intr = (int)irq->irq;
249
250         /*
251          * interrupts are asynchronous to vcpu execution therefore defer guest
252          * cp0 accesses
253          */
254         kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255 }
256
257 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258                                  u32 cause)
259 {
260         u32 irq = (priority < MIPS_EXC_MAX) ?
261                 kvm_priority_to_irq[priority] : 0;
262
263         switch (priority) {
264         case MIPS_EXC_INT_TIMER:
265                 set_gc0_cause(C_TI);
266                 break;
267
268         case MIPS_EXC_INT_IO_1:
269         case MIPS_EXC_INT_IO_2:
270         case MIPS_EXC_INT_IPI_1:
271         case MIPS_EXC_INT_IPI_2:
272                 if (cpu_has_guestctl2)
273                         set_c0_guestctl2(irq);
274                 else
275                         set_gc0_cause(irq);
276                 break;
277
278         default:
279                 break;
280         }
281
282         clear_bit(priority, &vcpu->arch.pending_exceptions);
283         return 1;
284 }
285
286 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287                                u32 cause)
288 {
289         u32 irq = (priority < MIPS_EXC_MAX) ?
290                 kvm_priority_to_irq[priority] : 0;
291
292         switch (priority) {
293         case MIPS_EXC_INT_TIMER:
294                 /*
295                  * Explicitly clear irq associated with Cause.IP[IPTI]
296                  * if GuestCtl2 virtual interrupt register not
297                  * supported or if not using GuestCtl2 Hardware Clear.
298                  */
299                 if (cpu_has_guestctl2) {
300                         if (!(read_c0_guestctl2() & (irq << 14)))
301                                 clear_c0_guestctl2(irq);
302                 } else {
303                         clear_gc0_cause(irq);
304                 }
305                 break;
306
307         case MIPS_EXC_INT_IO_1:
308         case MIPS_EXC_INT_IO_2:
309         case MIPS_EXC_INT_IPI_1:
310         case MIPS_EXC_INT_IPI_2:
311                 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
312                 if (cpu_has_guestctl2) {
313                         if (!(read_c0_guestctl2() & (irq << 14)))
314                                 clear_c0_guestctl2(irq);
315                 } else {
316                         clear_gc0_cause(irq);
317                 }
318                 break;
319
320         default:
321                 break;
322         }
323
324         clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
325         return 1;
326 }
327
328 /*
329  * VZ guest timer handling.
330  */
331
332 /**
333  * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
334  * @vcpu:       Virtual CPU.
335  *
336  * Returns:     true if the VZ GTOffset & real guest CP0_Count should be used
337  *              instead of software emulation of guest timer.
338  *              false otherwise.
339  */
340 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
341 {
342         if (kvm_mips_count_disabled(vcpu))
343                 return false;
344
345         /* Chosen frequency must match real frequency */
346         if (mips_hpt_frequency != vcpu->arch.count_hz)
347                 return false;
348
349         /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
350         if (current_cpu_data.gtoffset_mask != 0xffffffff)
351                 return false;
352
353         return true;
354 }
355
356 /**
357  * _kvm_vz_restore_stimer() - Restore soft timer state.
358  * @vcpu:       Virtual CPU.
359  * @compare:    CP0_Compare register value, restored by caller.
360  * @cause:      CP0_Cause register to restore.
361  *
362  * Restore VZ state relating to the soft timer. The hard timer can be enabled
363  * later.
364  */
365 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
366                                    u32 cause)
367 {
368         /*
369          * Avoid spurious counter interrupts by setting Guest CP0_Count to just
370          * after Guest CP0_Compare.
371          */
372         write_c0_gtoffset(compare - read_c0_count());
373
374         back_to_back_c0_hazard();
375         write_gc0_cause(cause);
376 }
377
378 /**
379  * _kvm_vz_restore_htimer() - Restore hard timer state.
380  * @vcpu:       Virtual CPU.
381  * @compare:    CP0_Compare register value, restored by caller.
382  * @cause:      CP0_Cause register to restore.
383  *
384  * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
385  * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
386  */
387 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
388                                    u32 compare, u32 cause)
389 {
390         u32 start_count, after_count;
391         unsigned long flags;
392
393         /*
394          * Freeze the soft-timer and sync the guest CP0_Count with it. We do
395          * this with interrupts disabled to avoid latency.
396          */
397         local_irq_save(flags);
398         kvm_mips_freeze_hrtimer(vcpu, &start_count);
399         write_c0_gtoffset(start_count - read_c0_count());
400         local_irq_restore(flags);
401
402         /* restore guest CP0_Cause, as TI may already be set */
403         back_to_back_c0_hazard();
404         write_gc0_cause(cause);
405
406         /*
407          * The above sequence isn't atomic and would result in lost timer
408          * interrupts if we're not careful. Detect if a timer interrupt is due
409          * and assert it.
410          */
411         back_to_back_c0_hazard();
412         after_count = read_gc0_count();
413         if (after_count - start_count > compare - start_count - 1)
414                 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
415 }
416
417 /**
418  * kvm_vz_restore_timer() - Restore timer state.
419  * @vcpu:       Virtual CPU.
420  *
421  * Restore soft timer state from saved context.
422  */
423 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
424 {
425         struct mips_coproc *cop0 = vcpu->arch.cop0;
426         u32 cause, compare;
427
428         compare = kvm_read_sw_gc0_compare(cop0);
429         cause = kvm_read_sw_gc0_cause(cop0);
430
431         write_gc0_compare(compare);
432         _kvm_vz_restore_stimer(vcpu, compare, cause);
433 }
434
435 /**
436  * kvm_vz_acquire_htimer() - Switch to hard timer state.
437  * @vcpu:       Virtual CPU.
438  *
439  * Restore hard timer state on top of existing soft timer state if possible.
440  *
441  * Since hard timer won't remain active over preemption, preemption should be
442  * disabled by the caller.
443  */
444 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
445 {
446         u32 gctl0;
447
448         gctl0 = read_c0_guestctl0();
449         if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
450                 /* enable guest access to hard timer */
451                 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
452
453                 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
454                                        read_gc0_cause());
455         }
456 }
457
458 /**
459  * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
460  * @vcpu:       Virtual CPU.
461  * @compare:    Pointer to write compare value to.
462  * @cause:      Pointer to write cause value to.
463  *
464  * Save VZ guest timer state and switch to software emulation of guest CP0
465  * timer. The hard timer must already be in use, so preemption should be
466  * disabled.
467  */
468 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
469                                 u32 *out_compare, u32 *out_cause)
470 {
471         u32 cause, compare, before_count, end_count;
472         ktime_t before_time;
473
474         compare = read_gc0_compare();
475         *out_compare = compare;
476
477         before_time = ktime_get();
478
479         /*
480          * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
481          * at which no pending timer interrupt is missing.
482          */
483         before_count = read_gc0_count();
484         back_to_back_c0_hazard();
485         cause = read_gc0_cause();
486         *out_cause = cause;
487
488         /*
489          * Record a final CP0_Count which we will transfer to the soft-timer.
490          * This is recorded *after* saving CP0_Cause, so we don't get any timer
491          * interrupts from just after the final CP0_Count point.
492          */
493         back_to_back_c0_hazard();
494         end_count = read_gc0_count();
495
496         /*
497          * The above sequence isn't atomic, so we could miss a timer interrupt
498          * between reading CP0_Cause and end_count. Detect and record any timer
499          * interrupt due between before_count and end_count.
500          */
501         if (end_count - before_count > compare - before_count - 1)
502                 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
503
504         /*
505          * Restore soft-timer, ignoring a small amount of negative drift due to
506          * delay between freeze_hrtimer and setting CP0_GTOffset.
507          */
508         kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
509 }
510
511 /**
512  * kvm_vz_save_timer() - Save guest timer state.
513  * @vcpu:       Virtual CPU.
514  *
515  * Save VZ guest timer state and switch to soft guest timer if hard timer was in
516  * use.
517  */
518 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
519 {
520         struct mips_coproc *cop0 = vcpu->arch.cop0;
521         u32 gctl0, compare, cause;
522
523         gctl0 = read_c0_guestctl0();
524         if (gctl0 & MIPS_GCTL0_GT) {
525                 /* disable guest use of hard timer */
526                 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
527
528                 /* save hard timer state */
529                 _kvm_vz_save_htimer(vcpu, &compare, &cause);
530         } else {
531                 compare = read_gc0_compare();
532                 cause = read_gc0_cause();
533         }
534
535         /* save timer-related state to VCPU context */
536         kvm_write_sw_gc0_cause(cop0, cause);
537         kvm_write_sw_gc0_compare(cop0, compare);
538 }
539
540 /**
541  * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
542  * @vcpu:       Virtual CPU.
543  *
544  * Transfers the state of the hard guest timer to the soft guest timer, leaving
545  * guest state intact so it can continue to be used with the soft timer.
546  */
547 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
548 {
549         u32 gctl0, compare, cause;
550
551         preempt_disable();
552         gctl0 = read_c0_guestctl0();
553         if (gctl0 & MIPS_GCTL0_GT) {
554                 /* disable guest use of timer */
555                 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
556
557                 /* switch to soft timer */
558                 _kvm_vz_save_htimer(vcpu, &compare, &cause);
559
560                 /* leave soft timer in usable state */
561                 _kvm_vz_restore_stimer(vcpu, compare, cause);
562         }
563         preempt_enable();
564 }
565
566 /**
567  * is_eva_access() - Find whether an instruction is an EVA memory accessor.
568  * @inst:       32-bit instruction encoding.
569  *
570  * Finds whether @inst encodes an EVA memory access instruction, which would
571  * indicate that emulation of it should access the user mode address space
572  * instead of the kernel mode address space. This matters for MUSUK segments
573  * which are TLB mapped for user mode but unmapped for kernel mode.
574  *
575  * Returns:     Whether @inst encodes an EVA accessor instruction.
576  */
577 static bool is_eva_access(union mips_instruction inst)
578 {
579         if (inst.spec3_format.opcode != spec3_op)
580                 return false;
581
582         switch (inst.spec3_format.func) {
583         case lwle_op:
584         case lwre_op:
585         case cachee_op:
586         case sbe_op:
587         case she_op:
588         case sce_op:
589         case swe_op:
590         case swle_op:
591         case swre_op:
592         case prefe_op:
593         case lbue_op:
594         case lhue_op:
595         case lbe_op:
596         case lhe_op:
597         case lle_op:
598         case lwe_op:
599                 return true;
600         default:
601                 return false;
602         }
603 }
604
605 /**
606  * is_eva_am_mapped() - Find whether an access mode is mapped.
607  * @vcpu:       KVM VCPU state.
608  * @am:         3-bit encoded access mode.
609  * @eu:         Segment becomes unmapped and uncached when Status.ERL=1.
610  *
611  * Decode @am to find whether it encodes a mapped segment for the current VCPU
612  * state. Where necessary @eu and the actual instruction causing the fault are
613  * taken into account to make the decision.
614  *
615  * Returns:     Whether the VCPU faulted on a TLB mapped address.
616  */
617 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
618 {
619         u32 am_lookup;
620         int err;
621
622         /*
623          * Interpret access control mode. We assume address errors will already
624          * have been caught by the guest, leaving us with:
625          *      AM      UM  SM  KM  31..24 23..16
626          * UK    0 000          Unm   0      0
627          * MK    1 001          TLB   1
628          * MSK   2 010      TLB TLB   1
629          * MUSK  3 011  TLB TLB TLB   1
630          * MUSUK 4 100  TLB TLB Unm   0      1
631          * USK   5 101      Unm Unm   0      0
632          * -     6 110                0      0
633          * UUSK  7 111  Unm Unm Unm   0      0
634          *
635          * We shift a magic value by AM across the sign bit to find if always
636          * TLB mapped, and if not shift by 8 again to find if it depends on KM.
637          */
638         am_lookup = 0x70080000 << am;
639         if ((s32)am_lookup < 0) {
640                 /*
641                  * MK, MSK, MUSK
642                  * Always TLB mapped, unless SegCtl.EU && ERL
643                  */
644                 if (!eu || !(read_gc0_status() & ST0_ERL))
645                         return true;
646         } else {
647                 am_lookup <<= 8;
648                 if ((s32)am_lookup < 0) {
649                         union mips_instruction inst;
650                         unsigned int status;
651                         u32 *opc;
652
653                         /*
654                          * MUSUK
655                          * TLB mapped if not in kernel mode
656                          */
657                         status = read_gc0_status();
658                         if (!(status & (ST0_EXL | ST0_ERL)) &&
659                             (status & ST0_KSU))
660                                 return true;
661                         /*
662                          * EVA access instructions in kernel
663                          * mode access user address space.
664                          */
665                         opc = (u32 *)vcpu->arch.pc;
666                         if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
667                                 opc += 1;
668                         err = kvm_get_badinstr(opc, vcpu, &inst.word);
669                         if (!err && is_eva_access(inst))
670                                 return true;
671                 }
672         }
673
674         return false;
675 }
676
677 /**
678  * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
679  * @vcpu:       KVM VCPU state.
680  * @gva:        Guest virtual address to convert.
681  * @gpa:        Output guest physical address.
682  *
683  * Convert a guest virtual address (GVA) which is valid according to the guest
684  * context, to a guest physical address (GPA).
685  *
686  * Returns:     0 on success.
687  *              -errno on failure.
688  */
689 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
690                              unsigned long *gpa)
691 {
692         u32 gva32 = gva;
693         unsigned long segctl;
694
695         if ((long)gva == (s32)gva32) {
696                 /* Handle canonical 32-bit virtual address */
697                 if (cpu_guest_has_segments) {
698                         unsigned long mask, pa;
699
700                         switch (gva32 >> 29) {
701                         case 0:
702                         case 1: /* CFG5 (1GB) */
703                                 segctl = read_gc0_segctl2() >> 16;
704                                 mask = (unsigned long)0xfc0000000ull;
705                                 break;
706                         case 2:
707                         case 3: /* CFG4 (1GB) */
708                                 segctl = read_gc0_segctl2();
709                                 mask = (unsigned long)0xfc0000000ull;
710                                 break;
711                         case 4: /* CFG3 (512MB) */
712                                 segctl = read_gc0_segctl1() >> 16;
713                                 mask = (unsigned long)0xfe0000000ull;
714                                 break;
715                         case 5: /* CFG2 (512MB) */
716                                 segctl = read_gc0_segctl1();
717                                 mask = (unsigned long)0xfe0000000ull;
718                                 break;
719                         case 6: /* CFG1 (512MB) */
720                                 segctl = read_gc0_segctl0() >> 16;
721                                 mask = (unsigned long)0xfe0000000ull;
722                                 break;
723                         case 7: /* CFG0 (512MB) */
724                                 segctl = read_gc0_segctl0();
725                                 mask = (unsigned long)0xfe0000000ull;
726                                 break;
727                         default:
728                                 /*
729                                  * GCC 4.9 isn't smart enough to figure out that
730                                  * segctl and mask are always initialised.
731                                  */
732                                 unreachable();
733                         }
734
735                         if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
736                                              segctl & 0x0008))
737                                 goto tlb_mapped;
738
739                         /* Unmapped, find guest physical address */
740                         pa = (segctl << 20) & mask;
741                         pa |= gva32 & ~mask;
742                         *gpa = pa;
743                         return 0;
744                 } else if ((s32)gva32 < (s32)0xc0000000) {
745                         /* legacy unmapped KSeg0 or KSeg1 */
746                         *gpa = gva32 & 0x1fffffff;
747                         return 0;
748                 }
749 #ifdef CONFIG_64BIT
750         } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
751                 /* XKPHYS */
752                 if (cpu_guest_has_segments) {
753                         /*
754                          * Each of the 8 regions can be overridden by SegCtl2.XR
755                          * to use SegCtl1.XAM.
756                          */
757                         segctl = read_gc0_segctl2();
758                         if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
759                                 segctl = read_gc0_segctl1();
760                                 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
761                                                      0))
762                                         goto tlb_mapped;
763                         }
764
765                 }
766                 /*
767                  * Traditionally fully unmapped.
768                  * Bits 61:59 specify the CCA, which we can just mask off here.
769                  * Bits 58:PABITS should be zero, but we shouldn't have got here
770                  * if it wasn't.
771                  */
772                 *gpa = gva & 0x07ffffffffffffff;
773                 return 0;
774 #endif
775         }
776
777 tlb_mapped:
778         return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
779 }
780
781 /**
782  * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
783  * @vcpu:       KVM VCPU state.
784  * @badvaddr:   Root BadVAddr.
785  * @gpa:        Output guest physical address.
786  *
787  * VZ implementations are permitted to report guest virtual addresses (GVA) in
788  * BadVAddr on a root exception during guest execution, instead of the more
789  * convenient guest physical addresses (GPA). When we get a GVA, this function
790  * converts it to a GPA, taking into account guest segmentation and guest TLB
791  * state.
792  *
793  * Returns:     0 on success.
794  *              -errno on failure.
795  */
796 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
797                                   unsigned long *gpa)
798 {
799         unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
800                                  MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
801
802         /* If BadVAddr is GPA, then all is well in the world */
803         if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
804                 *gpa = badvaddr;
805                 return 0;
806         }
807
808         /* Otherwise we'd expect it to be GVA ... */
809         if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
810                  "Unexpected gexccode %#x\n", gexccode))
811                 return -EINVAL;
812
813         /* ... and we need to perform the GVA->GPA translation in software */
814         return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
815 }
816
817 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
818 {
819         u32 *opc = (u32 *) vcpu->arch.pc;
820         u32 cause = vcpu->arch.host_cp0_cause;
821         u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
822         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
823         u32 inst = 0;
824
825         /*
826          *  Fetch the instruction.
827          */
828         if (cause & CAUSEF_BD)
829                 opc += 1;
830         kvm_get_badinstr(opc, vcpu, &inst);
831
832         kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
833                 exccode, opc, inst, badvaddr,
834                 read_gc0_status());
835         kvm_arch_vcpu_dump_regs(vcpu);
836         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
837         return RESUME_HOST;
838 }
839
840 static unsigned long mips_process_maar(unsigned int op, unsigned long val)
841 {
842         /* Mask off unused bits */
843         unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
844
845         if (read_gc0_pagegrain() & PG_ELPA)
846                 mask |= 0x00ffffff00000000ull;
847         if (cpu_guest_has_mvh)
848                 mask |= MIPS_MAAR_VH;
849
850         /* Set or clear VH */
851         if (op == mtc_op) {
852                 /* clear VH */
853                 val &= ~MIPS_MAAR_VH;
854         } else if (op == dmtc_op) {
855                 /* set VH to match VL */
856                 val &= ~MIPS_MAAR_VH;
857                 if (val & MIPS_MAAR_VL)
858                         val |= MIPS_MAAR_VH;
859         }
860
861         return val & mask;
862 }
863
864 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
865 {
866         struct mips_coproc *cop0 = vcpu->arch.cop0;
867
868         val &= MIPS_MAARI_INDEX;
869         if (val == MIPS_MAARI_INDEX)
870                 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
871         else if (val < ARRAY_SIZE(vcpu->arch.maar))
872                 kvm_write_sw_gc0_maari(cop0, val);
873 }
874
875 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
876                                               u32 *opc, u32 cause,
877                                               struct kvm_vcpu *vcpu)
878 {
879         struct mips_coproc *cop0 = vcpu->arch.cop0;
880         enum emulation_result er = EMULATE_DONE;
881         u32 rt, rd, sel;
882         unsigned long curr_pc;
883         unsigned long val;
884
885         /*
886          * Update PC and hold onto current PC in case there is
887          * an error and we want to rollback the PC
888          */
889         curr_pc = vcpu->arch.pc;
890         er = update_pc(vcpu, cause);
891         if (er == EMULATE_FAIL)
892                 return er;
893
894         if (inst.co_format.co) {
895                 switch (inst.co_format.func) {
896                 case wait_op:
897                         er = kvm_mips_emul_wait(vcpu);
898                         break;
899                 default:
900                         er = EMULATE_FAIL;
901                 }
902         } else {
903                 rt = inst.c0r_format.rt;
904                 rd = inst.c0r_format.rd;
905                 sel = inst.c0r_format.sel;
906
907                 switch (inst.c0r_format.rs) {
908                 case dmfc_op:
909                 case mfc_op:
910 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
911                         cop0->stat[rd][sel]++;
912 #endif
913                         if (rd == MIPS_CP0_COUNT &&
914                             sel == 0) {                 /* Count */
915                                 val = kvm_mips_read_count(vcpu);
916                         } else if (rd == MIPS_CP0_COMPARE &&
917                                    sel == 0) {          /* Compare */
918                                 val = read_gc0_compare();
919                         } else if (rd == MIPS_CP0_LLADDR &&
920                                    sel == 0) {          /* LLAddr */
921                                 if (cpu_guest_has_rw_llb)
922                                         val = read_gc0_lladdr() &
923                                                 MIPS_LLADDR_LLB;
924                                 else
925                                         val = 0;
926                         } else if (rd == MIPS_CP0_LLADDR &&
927                                    sel == 1 &&          /* MAAR */
928                                    cpu_guest_has_maar &&
929                                    !cpu_guest_has_dyn_maar) {
930                                 /* MAARI must be in range */
931                                 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
932                                                 ARRAY_SIZE(vcpu->arch.maar));
933                                 val = vcpu->arch.maar[
934                                         kvm_read_sw_gc0_maari(cop0)];
935                         } else if ((rd == MIPS_CP0_PRID &&
936                                     (sel == 0 ||        /* PRid */
937                                      sel == 2 ||        /* CDMMBase */
938                                      sel == 3)) ||      /* CMGCRBase */
939                                    (rd == MIPS_CP0_STATUS &&
940                                     (sel == 2 ||        /* SRSCtl */
941                                      sel == 3)) ||      /* SRSMap */
942                                    (rd == MIPS_CP0_CONFIG &&
943                                     (sel == 6 ||        /* Config6 */
944                                      sel == 7)) ||      /* Config7 */
945                                    (rd == MIPS_CP0_LLADDR &&
946                                     (sel == 2) &&       /* MAARI */
947                                     cpu_guest_has_maar &&
948                                     !cpu_guest_has_dyn_maar) ||
949                                    (rd == MIPS_CP0_ERRCTL &&
950                                     (sel == 0))) {      /* ErrCtl */
951                                 val = cop0->reg[rd][sel];
952 #ifdef CONFIG_CPU_LOONGSON64
953                         } else if (rd == MIPS_CP0_DIAG &&
954                                    (sel == 0)) {        /* Diag */
955                                 val = cop0->reg[rd][sel];
956 #endif
957                         } else {
958                                 val = 0;
959                                 er = EMULATE_FAIL;
960                         }
961
962                         if (er != EMULATE_FAIL) {
963                                 /* Sign extend */
964                                 if (inst.c0r_format.rs == mfc_op)
965                                         val = (int)val;
966                                 vcpu->arch.gprs[rt] = val;
967                         }
968
969                         trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
970                                         KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
971                                       KVM_TRACE_COP0(rd, sel), val);
972                         break;
973
974                 case dmtc_op:
975                 case mtc_op:
976 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
977                         cop0->stat[rd][sel]++;
978 #endif
979                         val = vcpu->arch.gprs[rt];
980                         trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
981                                         KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
982                                       KVM_TRACE_COP0(rd, sel), val);
983
984                         if (rd == MIPS_CP0_COUNT &&
985                             sel == 0) {                 /* Count */
986                                 kvm_vz_lose_htimer(vcpu);
987                                 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
988                         } else if (rd == MIPS_CP0_COMPARE &&
989                                    sel == 0) {          /* Compare */
990                                 kvm_mips_write_compare(vcpu,
991                                                        vcpu->arch.gprs[rt],
992                                                        true);
993                         } else if (rd == MIPS_CP0_LLADDR &&
994                                    sel == 0) {          /* LLAddr */
995                                 /*
996                                  * P5600 generates GPSI on guest MTC0 LLAddr.
997                                  * Only allow the guest to clear LLB.
998                                  */
999                                 if (cpu_guest_has_rw_llb &&
1000                                     !(val & MIPS_LLADDR_LLB))
1001                                         write_gc0_lladdr(0);
1002                         } else if (rd == MIPS_CP0_LLADDR &&
1003                                    sel == 1 &&          /* MAAR */
1004                                    cpu_guest_has_maar &&
1005                                    !cpu_guest_has_dyn_maar) {
1006                                 val = mips_process_maar(inst.c0r_format.rs,
1007                                                         val);
1008
1009                                 /* MAARI must be in range */
1010                                 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1011                                                 ARRAY_SIZE(vcpu->arch.maar));
1012                                 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1013                                                                         val;
1014                         } else if (rd == MIPS_CP0_LLADDR &&
1015                                    (sel == 2) &&        /* MAARI */
1016                                    cpu_guest_has_maar &&
1017                                    !cpu_guest_has_dyn_maar) {
1018                                 kvm_write_maari(vcpu, val);
1019                         } else if (rd == MIPS_CP0_CONFIG &&
1020                                    (sel == 6)) {
1021                                 cop0->reg[rd][sel] = (int)val;
1022                         } else if (rd == MIPS_CP0_ERRCTL &&
1023                                    (sel == 0)) {        /* ErrCtl */
1024                                 /* ignore the written value */
1025 #ifdef CONFIG_CPU_LOONGSON64
1026                         } else if (rd == MIPS_CP0_DIAG &&
1027                                    (sel == 0)) {        /* Diag */
1028                                 unsigned long flags;
1029
1030                                 local_irq_save(flags);
1031                                 if (val & LOONGSON_DIAG_BTB) {
1032                                         /* Flush BTB */
1033                                         set_c0_diag(LOONGSON_DIAG_BTB);
1034                                 }
1035                                 if (val & LOONGSON_DIAG_ITLB) {
1036                                         /* Flush ITLB */
1037                                         set_c0_diag(LOONGSON_DIAG_ITLB);
1038                                 }
1039                                 if (val & LOONGSON_DIAG_DTLB) {
1040                                         /* Flush DTLB */
1041                                         set_c0_diag(LOONGSON_DIAG_DTLB);
1042                                 }
1043                                 if (val & LOONGSON_DIAG_VTLB) {
1044                                         /* Flush VTLB */
1045                                         kvm_loongson_clear_guest_vtlb();
1046                                 }
1047                                 if (val & LOONGSON_DIAG_FTLB) {
1048                                         /* Flush FTLB */
1049                                         kvm_loongson_clear_guest_ftlb();
1050                                 }
1051                                 local_irq_restore(flags);
1052 #endif
1053                         } else {
1054                                 er = EMULATE_FAIL;
1055                         }
1056                         break;
1057
1058                 default:
1059                         er = EMULATE_FAIL;
1060                         break;
1061                 }
1062         }
1063         /* Rollback PC only if emulation was unsuccessful */
1064         if (er == EMULATE_FAIL) {
1065                 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1066                         curr_pc, __func__, inst.word);
1067
1068                 vcpu->arch.pc = curr_pc;
1069         }
1070
1071         return er;
1072 }
1073
1074 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1075                                                u32 *opc, u32 cause,
1076                                                struct kvm_vcpu *vcpu)
1077 {
1078         enum emulation_result er = EMULATE_DONE;
1079         u32 cache, op_inst, op, base;
1080         s16 offset;
1081         struct kvm_vcpu_arch *arch = &vcpu->arch;
1082         unsigned long va, curr_pc;
1083
1084         /*
1085          * Update PC and hold onto current PC in case there is
1086          * an error and we want to rollback the PC
1087          */
1088         curr_pc = vcpu->arch.pc;
1089         er = update_pc(vcpu, cause);
1090         if (er == EMULATE_FAIL)
1091                 return er;
1092
1093         base = inst.i_format.rs;
1094         op_inst = inst.i_format.rt;
1095         if (cpu_has_mips_r6)
1096                 offset = inst.spec3_format.simmediate;
1097         else
1098                 offset = inst.i_format.simmediate;
1099         cache = op_inst & CacheOp_Cache;
1100         op = op_inst & CacheOp_Op;
1101
1102         va = arch->gprs[base] + offset;
1103
1104         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1105                   cache, op, base, arch->gprs[base], offset);
1106
1107         /* Secondary or tirtiary cache ops ignored */
1108         if (cache != Cache_I && cache != Cache_D)
1109                 return EMULATE_DONE;
1110
1111         switch (op_inst) {
1112         case Index_Invalidate_I:
1113                 flush_icache_line_indexed(va);
1114                 return EMULATE_DONE;
1115         case Index_Writeback_Inv_D:
1116                 flush_dcache_line_indexed(va);
1117                 return EMULATE_DONE;
1118         case Hit_Invalidate_I:
1119         case Hit_Invalidate_D:
1120         case Hit_Writeback_Inv_D:
1121                 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1122                         /* We can just flush entire icache */
1123                         local_flush_icache_range(0, 0);
1124                         return EMULATE_DONE;
1125                 }
1126
1127                 /* So far, other platforms support guest hit cache ops */
1128                 break;
1129         default:
1130                 break;
1131         }
1132
1133         kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1134                 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1135                 offset);
1136         /* Rollback PC */
1137         vcpu->arch.pc = curr_pc;
1138
1139         return EMULATE_FAIL;
1140 }
1141
1142 #ifdef CONFIG_CPU_LOONGSON64
1143 static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1144                                               u32 *opc, u32 cause,
1145                                               struct kvm_vcpu *vcpu)
1146 {
1147         unsigned int rs, rd;
1148         unsigned int hostcfg;
1149         unsigned long curr_pc;
1150         enum emulation_result er = EMULATE_DONE;
1151
1152         /*
1153          * Update PC and hold onto current PC in case there is
1154          * an error and we want to rollback the PC
1155          */
1156         curr_pc = vcpu->arch.pc;
1157         er = update_pc(vcpu, cause);
1158         if (er == EMULATE_FAIL)
1159                 return er;
1160
1161         rs = inst.loongson3_lscsr_format.rs;
1162         rd = inst.loongson3_lscsr_format.rd;
1163         switch (inst.loongson3_lscsr_format.fr) {
1164         case 0x8:  /* Read CPUCFG */
1165                 ++vcpu->stat.vz_cpucfg_exits;
1166                 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1167
1168                 switch (vcpu->arch.gprs[rs]) {
1169                 case LOONGSON_CFG0:
1170                         vcpu->arch.gprs[rd] = 0x14c000;
1171                         break;
1172                 case LOONGSON_CFG1:
1173                         hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1174                                     LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1175                                     LOONGSON_CFG1_SFBP);
1176                         vcpu->arch.gprs[rd] = hostcfg;
1177                         break;
1178                 case LOONGSON_CFG2:
1179                         hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1180                                     LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1181                         vcpu->arch.gprs[rd] = hostcfg;
1182                         break;
1183                 case LOONGSON_CFG3:
1184                         vcpu->arch.gprs[rd] = hostcfg;
1185                         break;
1186                 default:
1187                         /* Don't export any other advanced features to guest */
1188                         vcpu->arch.gprs[rd] = 0;
1189                         break;
1190                 }
1191                 break;
1192
1193         default:
1194                 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1195                         inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1196                 er = EMULATE_FAIL;
1197                 break;
1198         }
1199
1200         /* Rollback PC only if emulation was unsuccessful */
1201         if (er == EMULATE_FAIL) {
1202                 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1203                         curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1204
1205                 vcpu->arch.pc = curr_pc;
1206         }
1207
1208         return er;
1209 }
1210 #endif
1211
1212 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1213                                                      struct kvm_vcpu *vcpu)
1214 {
1215         enum emulation_result er = EMULATE_DONE;
1216         struct kvm_vcpu_arch *arch = &vcpu->arch;
1217         union mips_instruction inst;
1218         int rd, rt, sel;
1219         int err;
1220
1221         /*
1222          *  Fetch the instruction.
1223          */
1224         if (cause & CAUSEF_BD)
1225                 opc += 1;
1226         err = kvm_get_badinstr(opc, vcpu, &inst.word);
1227         if (err)
1228                 return EMULATE_FAIL;
1229
1230         switch (inst.r_format.opcode) {
1231         case cop0_op:
1232                 er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1233                 break;
1234 #ifndef CONFIG_CPU_MIPSR6
1235         case cache_op:
1236                 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1237                 er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1238                 break;
1239 #endif
1240 #ifdef CONFIG_CPU_LOONGSON64
1241         case lwc2_op:
1242                 er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1243                 break;
1244 #endif
1245         case spec3_op:
1246                 switch (inst.spec3_format.func) {
1247 #ifdef CONFIG_CPU_MIPSR6
1248                 case cache6_op:
1249                         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1250                         er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1251                         break;
1252 #endif
1253                 case rdhwr_op:
1254                         if (inst.r_format.rs || (inst.r_format.re >> 3))
1255                                 goto unknown;
1256
1257                         rd = inst.r_format.rd;
1258                         rt = inst.r_format.rt;
1259                         sel = inst.r_format.re & 0x7;
1260
1261                         switch (rd) {
1262                         case MIPS_HWR_CC:       /* Read count register */
1263                                 arch->gprs[rt] =
1264                                         (long)(int)kvm_mips_read_count(vcpu);
1265                                 break;
1266                         default:
1267                                 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1268                                               KVM_TRACE_HWR(rd, sel), 0);
1269                                 goto unknown;
1270                         }
1271
1272                         trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1273                                       KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1274
1275                         er = update_pc(vcpu, cause);
1276                         break;
1277                 default:
1278                         goto unknown;
1279                 }
1280                 break;
1281 unknown:
1282
1283         default:
1284                 kvm_err("GPSI exception not supported (%p/%#x)\n",
1285                                 opc, inst.word);
1286                 kvm_arch_vcpu_dump_regs(vcpu);
1287                 er = EMULATE_FAIL;
1288                 break;
1289         }
1290
1291         return er;
1292 }
1293
1294 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1295                                                      struct kvm_vcpu *vcpu)
1296 {
1297         enum emulation_result er = EMULATE_DONE;
1298         struct kvm_vcpu_arch *arch = &vcpu->arch;
1299         union mips_instruction inst;
1300         int err;
1301
1302         /*
1303          *  Fetch the instruction.
1304          */
1305         if (cause & CAUSEF_BD)
1306                 opc += 1;
1307         err = kvm_get_badinstr(opc, vcpu, &inst.word);
1308         if (err)
1309                 return EMULATE_FAIL;
1310
1311         /* complete MTC0 on behalf of guest and advance EPC */
1312         if (inst.c0r_format.opcode == cop0_op &&
1313             inst.c0r_format.rs == mtc_op &&
1314             inst.c0r_format.z == 0) {
1315                 int rt = inst.c0r_format.rt;
1316                 int rd = inst.c0r_format.rd;
1317                 int sel = inst.c0r_format.sel;
1318                 unsigned int val = arch->gprs[rt];
1319                 unsigned int old_val, change;
1320
1321                 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1322                               val);
1323
1324                 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1325                         /* FR bit should read as zero if no FPU */
1326                         if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1327                                 val &= ~(ST0_CU1 | ST0_FR);
1328
1329                         /*
1330                          * Also don't allow FR to be set if host doesn't support
1331                          * it.
1332                          */
1333                         if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1334                                 val &= ~ST0_FR;
1335
1336                         old_val = read_gc0_status();
1337                         change = val ^ old_val;
1338
1339                         if (change & ST0_FR) {
1340                                 /*
1341                                  * FPU and Vector register state is made
1342                                  * UNPREDICTABLE by a change of FR, so don't
1343                                  * even bother saving it.
1344                                  */
1345                                 kvm_drop_fpu(vcpu);
1346                         }
1347
1348                         /*
1349                          * If MSA state is already live, it is undefined how it
1350                          * interacts with FR=0 FPU state, and we don't want to
1351                          * hit reserved instruction exceptions trying to save
1352                          * the MSA state later when CU=1 && FR=1, so play it
1353                          * safe and save it first.
1354                          */
1355                         if (change & ST0_CU1 && !(val & ST0_FR) &&
1356                             vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1357                                 kvm_lose_fpu(vcpu);
1358
1359                         write_gc0_status(val);
1360                 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1361                         u32 old_cause = read_gc0_cause();
1362                         u32 change = old_cause ^ val;
1363
1364                         /* DC bit enabling/disabling timer? */
1365                         if (change & CAUSEF_DC) {
1366                                 if (val & CAUSEF_DC) {
1367                                         kvm_vz_lose_htimer(vcpu);
1368                                         kvm_mips_count_disable_cause(vcpu);
1369                                 } else {
1370                                         kvm_mips_count_enable_cause(vcpu);
1371                                 }
1372                         }
1373
1374                         /* Only certain bits are RW to the guest */
1375                         change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1376                                    CAUSEF_IP0 | CAUSEF_IP1);
1377
1378                         /* WP can only be cleared */
1379                         change &= ~CAUSEF_WP | old_cause;
1380
1381                         write_gc0_cause(old_cause ^ change);
1382                 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1383                         write_gc0_intctl(val);
1384                 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1385                         old_val = read_gc0_config5();
1386                         change = val ^ old_val;
1387                         /* Handle changes in FPU/MSA modes */
1388                         preempt_disable();
1389
1390                         /*
1391                          * Propagate FRE changes immediately if the FPU
1392                          * context is already loaded.
1393                          */
1394                         if (change & MIPS_CONF5_FRE &&
1395                             vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1396                                 change_c0_config5(MIPS_CONF5_FRE, val);
1397
1398                         preempt_enable();
1399
1400                         val = old_val ^
1401                                 (change & kvm_vz_config5_guest_wrmask(vcpu));
1402                         write_gc0_config5(val);
1403                 } else {
1404                         kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1405                             opc, inst.word);
1406                         er = EMULATE_FAIL;
1407                 }
1408
1409                 if (er != EMULATE_FAIL)
1410                         er = update_pc(vcpu, cause);
1411         } else {
1412                 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1413                         opc, inst.word);
1414                 er = EMULATE_FAIL;
1415         }
1416
1417         return er;
1418 }
1419
1420 static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1421                                                      struct kvm_vcpu *vcpu)
1422 {
1423         /*
1424          * Presumably this is due to MC (guest mode change), so lets trace some
1425          * relevant info.
1426          */
1427         trace_kvm_guest_mode_change(vcpu);
1428
1429         return EMULATE_DONE;
1430 }
1431
1432 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1433                                                    struct kvm_vcpu *vcpu)
1434 {
1435         enum emulation_result er;
1436         union mips_instruction inst;
1437         unsigned long curr_pc;
1438         int err;
1439
1440         if (cause & CAUSEF_BD)
1441                 opc += 1;
1442         err = kvm_get_badinstr(opc, vcpu, &inst.word);
1443         if (err)
1444                 return EMULATE_FAIL;
1445
1446         /*
1447          * Update PC and hold onto current PC in case there is
1448          * an error and we want to rollback the PC
1449          */
1450         curr_pc = vcpu->arch.pc;
1451         er = update_pc(vcpu, cause);
1452         if (er == EMULATE_FAIL)
1453                 return er;
1454
1455         er = kvm_mips_emul_hypcall(vcpu, inst);
1456         if (er == EMULATE_FAIL)
1457                 vcpu->arch.pc = curr_pc;
1458
1459         return er;
1460 }
1461
1462 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1463                                                         u32 cause,
1464                                                         u32 *opc,
1465                                                         struct kvm_vcpu *vcpu)
1466 {
1467         u32 inst;
1468
1469         /*
1470          *  Fetch the instruction.
1471          */
1472         if (cause & CAUSEF_BD)
1473                 opc += 1;
1474         kvm_get_badinstr(opc, vcpu, &inst);
1475
1476         kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x  Status: %#x\n",
1477                 gexccode, opc, inst, read_gc0_status());
1478
1479         return EMULATE_FAIL;
1480 }
1481
1482 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1483 {
1484         u32 *opc = (u32 *) vcpu->arch.pc;
1485         u32 cause = vcpu->arch.host_cp0_cause;
1486         enum emulation_result er = EMULATE_DONE;
1487         u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1488                         MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1489         int ret = RESUME_GUEST;
1490
1491         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1492         switch (gexccode) {
1493         case MIPS_GCTL0_GEXC_GPSI:
1494                 ++vcpu->stat.vz_gpsi_exits;
1495                 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1496                 break;
1497         case MIPS_GCTL0_GEXC_GSFC:
1498                 ++vcpu->stat.vz_gsfc_exits;
1499                 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1500                 break;
1501         case MIPS_GCTL0_GEXC_HC:
1502                 ++vcpu->stat.vz_hc_exits;
1503                 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1504                 break;
1505         case MIPS_GCTL0_GEXC_GRR:
1506                 ++vcpu->stat.vz_grr_exits;
1507                 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1508                                                        vcpu);
1509                 break;
1510         case MIPS_GCTL0_GEXC_GVA:
1511                 ++vcpu->stat.vz_gva_exits;
1512                 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1513                                                        vcpu);
1514                 break;
1515         case MIPS_GCTL0_GEXC_GHFC:
1516                 ++vcpu->stat.vz_ghfc_exits;
1517                 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1518                 break;
1519         case MIPS_GCTL0_GEXC_GPA:
1520                 ++vcpu->stat.vz_gpa_exits;
1521                 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1522                                                        vcpu);
1523                 break;
1524         default:
1525                 ++vcpu->stat.vz_resvd_exits;
1526                 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1527                                                        vcpu);
1528                 break;
1529
1530         }
1531
1532         if (er == EMULATE_DONE) {
1533                 ret = RESUME_GUEST;
1534         } else if (er == EMULATE_HYPERCALL) {
1535                 ret = kvm_mips_handle_hypcall(vcpu);
1536         } else {
1537                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1538                 ret = RESUME_HOST;
1539         }
1540         return ret;
1541 }
1542
1543 /**
1544  * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1545  * @vcpu:       Virtual CPU context.
1546  *
1547  * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1548  * by the root context.
1549  */
1550 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1551 {
1552         u32 cause = vcpu->arch.host_cp0_cause;
1553         enum emulation_result er = EMULATE_FAIL;
1554         int ret = RESUME_GUEST;
1555
1556         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1557                 /*
1558                  * If guest FPU not present, the FPU operation should have been
1559                  * treated as a reserved instruction!
1560                  * If FPU already in use, we shouldn't get this at all.
1561                  */
1562                 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1563                             vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1564                         preempt_enable();
1565                         return EMULATE_FAIL;
1566                 }
1567
1568                 kvm_own_fpu(vcpu);
1569                 er = EMULATE_DONE;
1570         }
1571         /* other coprocessors not handled */
1572
1573         switch (er) {
1574         case EMULATE_DONE:
1575                 ret = RESUME_GUEST;
1576                 break;
1577
1578         case EMULATE_FAIL:
1579                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1580                 ret = RESUME_HOST;
1581                 break;
1582
1583         default:
1584                 BUG();
1585         }
1586         return ret;
1587 }
1588
1589 /**
1590  * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1591  * @vcpu:       Virtual CPU context.
1592  *
1593  * Handle when the guest attempts to use MSA when it is disabled in the root
1594  * context.
1595  */
1596 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1597 {
1598         /*
1599          * If MSA not present or not exposed to guest or FR=0, the MSA operation
1600          * should have been treated as a reserved instruction!
1601          * Same if CU1=1, FR=0.
1602          * If MSA already in use, we shouldn't get this at all.
1603          */
1604         if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1605             (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1606             !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1607             vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1608                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1609                 return RESUME_HOST;
1610         }
1611
1612         kvm_own_msa(vcpu);
1613
1614         return RESUME_GUEST;
1615 }
1616
1617 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1618 {
1619         struct kvm_run *run = vcpu->run;
1620         u32 *opc = (u32 *) vcpu->arch.pc;
1621         u32 cause = vcpu->arch.host_cp0_cause;
1622         ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1623         union mips_instruction inst;
1624         enum emulation_result er = EMULATE_DONE;
1625         int err, ret = RESUME_GUEST;
1626
1627         if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1628                 /* A code fetch fault doesn't count as an MMIO */
1629                 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1630                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1631                         return RESUME_HOST;
1632                 }
1633
1634                 /* Fetch the instruction */
1635                 if (cause & CAUSEF_BD)
1636                         opc += 1;
1637                 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1638                 if (err) {
1639                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1640                         return RESUME_HOST;
1641                 }
1642
1643                 /* Treat as MMIO */
1644                 er = kvm_mips_emulate_load(inst, cause, vcpu);
1645                 if (er == EMULATE_FAIL) {
1646                         kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1647                                 opc, badvaddr);
1648                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1649                 }
1650         }
1651
1652         if (er == EMULATE_DONE) {
1653                 ret = RESUME_GUEST;
1654         } else if (er == EMULATE_DO_MMIO) {
1655                 run->exit_reason = KVM_EXIT_MMIO;
1656                 ret = RESUME_HOST;
1657         } else {
1658                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1659                 ret = RESUME_HOST;
1660         }
1661         return ret;
1662 }
1663
1664 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1665 {
1666         struct kvm_run *run = vcpu->run;
1667         u32 *opc = (u32 *) vcpu->arch.pc;
1668         u32 cause = vcpu->arch.host_cp0_cause;
1669         ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1670         union mips_instruction inst;
1671         enum emulation_result er = EMULATE_DONE;
1672         int err;
1673         int ret = RESUME_GUEST;
1674
1675         /* Just try the access again if we couldn't do the translation */
1676         if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1677                 return RESUME_GUEST;
1678         vcpu->arch.host_cp0_badvaddr = badvaddr;
1679
1680         if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1681                 /* Fetch the instruction */
1682                 if (cause & CAUSEF_BD)
1683                         opc += 1;
1684                 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1685                 if (err) {
1686                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1687                         return RESUME_HOST;
1688                 }
1689
1690                 /* Treat as MMIO */
1691                 er = kvm_mips_emulate_store(inst, cause, vcpu);
1692                 if (er == EMULATE_FAIL) {
1693                         kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1694                                 opc, badvaddr);
1695                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1696                 }
1697         }
1698
1699         if (er == EMULATE_DONE) {
1700                 ret = RESUME_GUEST;
1701         } else if (er == EMULATE_DO_MMIO) {
1702                 run->exit_reason = KVM_EXIT_MMIO;
1703                 ret = RESUME_HOST;
1704         } else {
1705                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1706                 ret = RESUME_HOST;
1707         }
1708         return ret;
1709 }
1710
1711 static u64 kvm_vz_get_one_regs[] = {
1712         KVM_REG_MIPS_CP0_INDEX,
1713         KVM_REG_MIPS_CP0_ENTRYLO0,
1714         KVM_REG_MIPS_CP0_ENTRYLO1,
1715         KVM_REG_MIPS_CP0_CONTEXT,
1716         KVM_REG_MIPS_CP0_PAGEMASK,
1717         KVM_REG_MIPS_CP0_PAGEGRAIN,
1718         KVM_REG_MIPS_CP0_WIRED,
1719         KVM_REG_MIPS_CP0_HWRENA,
1720         KVM_REG_MIPS_CP0_BADVADDR,
1721         KVM_REG_MIPS_CP0_COUNT,
1722         KVM_REG_MIPS_CP0_ENTRYHI,
1723         KVM_REG_MIPS_CP0_COMPARE,
1724         KVM_REG_MIPS_CP0_STATUS,
1725         KVM_REG_MIPS_CP0_INTCTL,
1726         KVM_REG_MIPS_CP0_CAUSE,
1727         KVM_REG_MIPS_CP0_EPC,
1728         KVM_REG_MIPS_CP0_PRID,
1729         KVM_REG_MIPS_CP0_EBASE,
1730         KVM_REG_MIPS_CP0_CONFIG,
1731         KVM_REG_MIPS_CP0_CONFIG1,
1732         KVM_REG_MIPS_CP0_CONFIG2,
1733         KVM_REG_MIPS_CP0_CONFIG3,
1734         KVM_REG_MIPS_CP0_CONFIG4,
1735         KVM_REG_MIPS_CP0_CONFIG5,
1736         KVM_REG_MIPS_CP0_CONFIG6,
1737 #ifdef CONFIG_64BIT
1738         KVM_REG_MIPS_CP0_XCONTEXT,
1739 #endif
1740         KVM_REG_MIPS_CP0_ERROREPC,
1741
1742         KVM_REG_MIPS_COUNT_CTL,
1743         KVM_REG_MIPS_COUNT_RESUME,
1744         KVM_REG_MIPS_COUNT_HZ,
1745 };
1746
1747 static u64 kvm_vz_get_one_regs_contextconfig[] = {
1748         KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1749 #ifdef CONFIG_64BIT
1750         KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1751 #endif
1752 };
1753
1754 static u64 kvm_vz_get_one_regs_segments[] = {
1755         KVM_REG_MIPS_CP0_SEGCTL0,
1756         KVM_REG_MIPS_CP0_SEGCTL1,
1757         KVM_REG_MIPS_CP0_SEGCTL2,
1758 };
1759
1760 static u64 kvm_vz_get_one_regs_htw[] = {
1761         KVM_REG_MIPS_CP0_PWBASE,
1762         KVM_REG_MIPS_CP0_PWFIELD,
1763         KVM_REG_MIPS_CP0_PWSIZE,
1764         KVM_REG_MIPS_CP0_PWCTL,
1765 };
1766
1767 static u64 kvm_vz_get_one_regs_kscratch[] = {
1768         KVM_REG_MIPS_CP0_KSCRATCH1,
1769         KVM_REG_MIPS_CP0_KSCRATCH2,
1770         KVM_REG_MIPS_CP0_KSCRATCH3,
1771         KVM_REG_MIPS_CP0_KSCRATCH4,
1772         KVM_REG_MIPS_CP0_KSCRATCH5,
1773         KVM_REG_MIPS_CP0_KSCRATCH6,
1774 };
1775
1776 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1777 {
1778         unsigned long ret;
1779
1780         ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1781         if (cpu_guest_has_userlocal)
1782                 ++ret;
1783         if (cpu_guest_has_badinstr)
1784                 ++ret;
1785         if (cpu_guest_has_badinstrp)
1786                 ++ret;
1787         if (cpu_guest_has_contextconfig)
1788                 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1789         if (cpu_guest_has_segments)
1790                 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1791         if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1792                 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1793         if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1794                 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1795         ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1796
1797         return ret;
1798 }
1799
1800 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1801 {
1802         u64 index;
1803         unsigned int i;
1804
1805         if (copy_to_user(indices, kvm_vz_get_one_regs,
1806                          sizeof(kvm_vz_get_one_regs)))
1807                 return -EFAULT;
1808         indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1809
1810         if (cpu_guest_has_userlocal) {
1811                 index = KVM_REG_MIPS_CP0_USERLOCAL;
1812                 if (copy_to_user(indices, &index, sizeof(index)))
1813                         return -EFAULT;
1814                 ++indices;
1815         }
1816         if (cpu_guest_has_badinstr) {
1817                 index = KVM_REG_MIPS_CP0_BADINSTR;
1818                 if (copy_to_user(indices, &index, sizeof(index)))
1819                         return -EFAULT;
1820                 ++indices;
1821         }
1822         if (cpu_guest_has_badinstrp) {
1823                 index = KVM_REG_MIPS_CP0_BADINSTRP;
1824                 if (copy_to_user(indices, &index, sizeof(index)))
1825                         return -EFAULT;
1826                 ++indices;
1827         }
1828         if (cpu_guest_has_contextconfig) {
1829                 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1830                                  sizeof(kvm_vz_get_one_regs_contextconfig)))
1831                         return -EFAULT;
1832                 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1833         }
1834         if (cpu_guest_has_segments) {
1835                 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1836                                  sizeof(kvm_vz_get_one_regs_segments)))
1837                         return -EFAULT;
1838                 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1839         }
1840         if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1841                 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1842                                  sizeof(kvm_vz_get_one_regs_htw)))
1843                         return -EFAULT;
1844                 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1845         }
1846         if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1847                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1848                         index = KVM_REG_MIPS_CP0_MAAR(i);
1849                         if (copy_to_user(indices, &index, sizeof(index)))
1850                                 return -EFAULT;
1851                         ++indices;
1852                 }
1853
1854                 index = KVM_REG_MIPS_CP0_MAARI;
1855                 if (copy_to_user(indices, &index, sizeof(index)))
1856                         return -EFAULT;
1857                 ++indices;
1858         }
1859         for (i = 0; i < 6; ++i) {
1860                 if (!cpu_guest_has_kscr(i + 2))
1861                         continue;
1862
1863                 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1864                                  sizeof(kvm_vz_get_one_regs_kscratch[i])))
1865                         return -EFAULT;
1866                 ++indices;
1867         }
1868
1869         return 0;
1870 }
1871
1872 static inline s64 entrylo_kvm_to_user(unsigned long v)
1873 {
1874         s64 mask, ret = v;
1875
1876         if (BITS_PER_LONG == 32) {
1877                 /*
1878                  * KVM API exposes 64-bit version of the register, so move the
1879                  * RI/XI bits up into place.
1880                  */
1881                 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1882                 ret &= ~mask;
1883                 ret |= ((s64)v & mask) << 32;
1884         }
1885         return ret;
1886 }
1887
1888 static inline unsigned long entrylo_user_to_kvm(s64 v)
1889 {
1890         unsigned long mask, ret = v;
1891
1892         if (BITS_PER_LONG == 32) {
1893                 /*
1894                  * KVM API exposes 64-bit versiono of the register, so move the
1895                  * RI/XI bits down into place.
1896                  */
1897                 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1898                 ret &= ~mask;
1899                 ret |= (v >> 32) & mask;
1900         }
1901         return ret;
1902 }
1903
1904 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1905                               const struct kvm_one_reg *reg,
1906                               s64 *v)
1907 {
1908         struct mips_coproc *cop0 = vcpu->arch.cop0;
1909         unsigned int idx;
1910
1911         switch (reg->id) {
1912         case KVM_REG_MIPS_CP0_INDEX:
1913                 *v = (long)read_gc0_index();
1914                 break;
1915         case KVM_REG_MIPS_CP0_ENTRYLO0:
1916                 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1917                 break;
1918         case KVM_REG_MIPS_CP0_ENTRYLO1:
1919                 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1920                 break;
1921         case KVM_REG_MIPS_CP0_CONTEXT:
1922                 *v = (long)read_gc0_context();
1923                 break;
1924         case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1925                 if (!cpu_guest_has_contextconfig)
1926                         return -EINVAL;
1927                 *v = read_gc0_contextconfig();
1928                 break;
1929         case KVM_REG_MIPS_CP0_USERLOCAL:
1930                 if (!cpu_guest_has_userlocal)
1931                         return -EINVAL;
1932                 *v = read_gc0_userlocal();
1933                 break;
1934 #ifdef CONFIG_64BIT
1935         case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1936                 if (!cpu_guest_has_contextconfig)
1937                         return -EINVAL;
1938                 *v = read_gc0_xcontextconfig();
1939                 break;
1940 #endif
1941         case KVM_REG_MIPS_CP0_PAGEMASK:
1942                 *v = (long)read_gc0_pagemask();
1943                 break;
1944         case KVM_REG_MIPS_CP0_PAGEGRAIN:
1945                 *v = (long)read_gc0_pagegrain();
1946                 break;
1947         case KVM_REG_MIPS_CP0_SEGCTL0:
1948                 if (!cpu_guest_has_segments)
1949                         return -EINVAL;
1950                 *v = read_gc0_segctl0();
1951                 break;
1952         case KVM_REG_MIPS_CP0_SEGCTL1:
1953                 if (!cpu_guest_has_segments)
1954                         return -EINVAL;
1955                 *v = read_gc0_segctl1();
1956                 break;
1957         case KVM_REG_MIPS_CP0_SEGCTL2:
1958                 if (!cpu_guest_has_segments)
1959                         return -EINVAL;
1960                 *v = read_gc0_segctl2();
1961                 break;
1962         case KVM_REG_MIPS_CP0_PWBASE:
1963                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1964                         return -EINVAL;
1965                 *v = read_gc0_pwbase();
1966                 break;
1967         case KVM_REG_MIPS_CP0_PWFIELD:
1968                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1969                         return -EINVAL;
1970                 *v = read_gc0_pwfield();
1971                 break;
1972         case KVM_REG_MIPS_CP0_PWSIZE:
1973                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1974                         return -EINVAL;
1975                 *v = read_gc0_pwsize();
1976                 break;
1977         case KVM_REG_MIPS_CP0_WIRED:
1978                 *v = (long)read_gc0_wired();
1979                 break;
1980         case KVM_REG_MIPS_CP0_PWCTL:
1981                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1982                         return -EINVAL;
1983                 *v = read_gc0_pwctl();
1984                 break;
1985         case KVM_REG_MIPS_CP0_HWRENA:
1986                 *v = (long)read_gc0_hwrena();
1987                 break;
1988         case KVM_REG_MIPS_CP0_BADVADDR:
1989                 *v = (long)read_gc0_badvaddr();
1990                 break;
1991         case KVM_REG_MIPS_CP0_BADINSTR:
1992                 if (!cpu_guest_has_badinstr)
1993                         return -EINVAL;
1994                 *v = read_gc0_badinstr();
1995                 break;
1996         case KVM_REG_MIPS_CP0_BADINSTRP:
1997                 if (!cpu_guest_has_badinstrp)
1998                         return -EINVAL;
1999                 *v = read_gc0_badinstrp();
2000                 break;
2001         case KVM_REG_MIPS_CP0_COUNT:
2002                 *v = kvm_mips_read_count(vcpu);
2003                 break;
2004         case KVM_REG_MIPS_CP0_ENTRYHI:
2005                 *v = (long)read_gc0_entryhi();
2006                 break;
2007         case KVM_REG_MIPS_CP0_COMPARE:
2008                 *v = (long)read_gc0_compare();
2009                 break;
2010         case KVM_REG_MIPS_CP0_STATUS:
2011                 *v = (long)read_gc0_status();
2012                 break;
2013         case KVM_REG_MIPS_CP0_INTCTL:
2014                 *v = read_gc0_intctl();
2015                 break;
2016         case KVM_REG_MIPS_CP0_CAUSE:
2017                 *v = (long)read_gc0_cause();
2018                 break;
2019         case KVM_REG_MIPS_CP0_EPC:
2020                 *v = (long)read_gc0_epc();
2021                 break;
2022         case KVM_REG_MIPS_CP0_PRID:
2023                 switch (boot_cpu_type()) {
2024                 case CPU_CAVIUM_OCTEON3:
2025                         /* Octeon III has a read-only guest.PRid */
2026                         *v = read_gc0_prid();
2027                         break;
2028                 default:
2029                         *v = (long)kvm_read_c0_guest_prid(cop0);
2030                         break;
2031                 }
2032                 break;
2033         case KVM_REG_MIPS_CP0_EBASE:
2034                 *v = kvm_vz_read_gc0_ebase();
2035                 break;
2036         case KVM_REG_MIPS_CP0_CONFIG:
2037                 *v = read_gc0_config();
2038                 break;
2039         case KVM_REG_MIPS_CP0_CONFIG1:
2040                 if (!cpu_guest_has_conf1)
2041                         return -EINVAL;
2042                 *v = read_gc0_config1();
2043                 break;
2044         case KVM_REG_MIPS_CP0_CONFIG2:
2045                 if (!cpu_guest_has_conf2)
2046                         return -EINVAL;
2047                 *v = read_gc0_config2();
2048                 break;
2049         case KVM_REG_MIPS_CP0_CONFIG3:
2050                 if (!cpu_guest_has_conf3)
2051                         return -EINVAL;
2052                 *v = read_gc0_config3();
2053                 break;
2054         case KVM_REG_MIPS_CP0_CONFIG4:
2055                 if (!cpu_guest_has_conf4)
2056                         return -EINVAL;
2057                 *v = read_gc0_config4();
2058                 break;
2059         case KVM_REG_MIPS_CP0_CONFIG5:
2060                 if (!cpu_guest_has_conf5)
2061                         return -EINVAL;
2062                 *v = read_gc0_config5();
2063                 break;
2064         case KVM_REG_MIPS_CP0_CONFIG6:
2065                 *v = kvm_read_sw_gc0_config6(cop0);
2066                 break;
2067         case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2068                 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2069                         return -EINVAL;
2070                 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2071                 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2072                         return -EINVAL;
2073                 *v = vcpu->arch.maar[idx];
2074                 break;
2075         case KVM_REG_MIPS_CP0_MAARI:
2076                 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2077                         return -EINVAL;
2078                 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2079                 break;
2080 #ifdef CONFIG_64BIT
2081         case KVM_REG_MIPS_CP0_XCONTEXT:
2082                 *v = read_gc0_xcontext();
2083                 break;
2084 #endif
2085         case KVM_REG_MIPS_CP0_ERROREPC:
2086                 *v = (long)read_gc0_errorepc();
2087                 break;
2088         case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2089                 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2090                 if (!cpu_guest_has_kscr(idx))
2091                         return -EINVAL;
2092                 switch (idx) {
2093                 case 2:
2094                         *v = (long)read_gc0_kscratch1();
2095                         break;
2096                 case 3:
2097                         *v = (long)read_gc0_kscratch2();
2098                         break;
2099                 case 4:
2100                         *v = (long)read_gc0_kscratch3();
2101                         break;
2102                 case 5:
2103                         *v = (long)read_gc0_kscratch4();
2104                         break;
2105                 case 6:
2106                         *v = (long)read_gc0_kscratch5();
2107                         break;
2108                 case 7:
2109                         *v = (long)read_gc0_kscratch6();
2110                         break;
2111                 }
2112                 break;
2113         case KVM_REG_MIPS_COUNT_CTL:
2114                 *v = vcpu->arch.count_ctl;
2115                 break;
2116         case KVM_REG_MIPS_COUNT_RESUME:
2117                 *v = ktime_to_ns(vcpu->arch.count_resume);
2118                 break;
2119         case KVM_REG_MIPS_COUNT_HZ:
2120                 *v = vcpu->arch.count_hz;
2121                 break;
2122         default:
2123                 return -EINVAL;
2124         }
2125         return 0;
2126 }
2127
2128 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2129                               const struct kvm_one_reg *reg,
2130                               s64 v)
2131 {
2132         struct mips_coproc *cop0 = vcpu->arch.cop0;
2133         unsigned int idx;
2134         int ret = 0;
2135         unsigned int cur, change;
2136
2137         switch (reg->id) {
2138         case KVM_REG_MIPS_CP0_INDEX:
2139                 write_gc0_index(v);
2140                 break;
2141         case KVM_REG_MIPS_CP0_ENTRYLO0:
2142                 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2143                 break;
2144         case KVM_REG_MIPS_CP0_ENTRYLO1:
2145                 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2146                 break;
2147         case KVM_REG_MIPS_CP0_CONTEXT:
2148                 write_gc0_context(v);
2149                 break;
2150         case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2151                 if (!cpu_guest_has_contextconfig)
2152                         return -EINVAL;
2153                 write_gc0_contextconfig(v);
2154                 break;
2155         case KVM_REG_MIPS_CP0_USERLOCAL:
2156                 if (!cpu_guest_has_userlocal)
2157                         return -EINVAL;
2158                 write_gc0_userlocal(v);
2159                 break;
2160 #ifdef CONFIG_64BIT
2161         case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2162                 if (!cpu_guest_has_contextconfig)
2163                         return -EINVAL;
2164                 write_gc0_xcontextconfig(v);
2165                 break;
2166 #endif
2167         case KVM_REG_MIPS_CP0_PAGEMASK:
2168                 write_gc0_pagemask(v);
2169                 break;
2170         case KVM_REG_MIPS_CP0_PAGEGRAIN:
2171                 write_gc0_pagegrain(v);
2172                 break;
2173         case KVM_REG_MIPS_CP0_SEGCTL0:
2174                 if (!cpu_guest_has_segments)
2175                         return -EINVAL;
2176                 write_gc0_segctl0(v);
2177                 break;
2178         case KVM_REG_MIPS_CP0_SEGCTL1:
2179                 if (!cpu_guest_has_segments)
2180                         return -EINVAL;
2181                 write_gc0_segctl1(v);
2182                 break;
2183         case KVM_REG_MIPS_CP0_SEGCTL2:
2184                 if (!cpu_guest_has_segments)
2185                         return -EINVAL;
2186                 write_gc0_segctl2(v);
2187                 break;
2188         case KVM_REG_MIPS_CP0_PWBASE:
2189                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2190                         return -EINVAL;
2191                 write_gc0_pwbase(v);
2192                 break;
2193         case KVM_REG_MIPS_CP0_PWFIELD:
2194                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2195                         return -EINVAL;
2196                 write_gc0_pwfield(v);
2197                 break;
2198         case KVM_REG_MIPS_CP0_PWSIZE:
2199                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2200                         return -EINVAL;
2201                 write_gc0_pwsize(v);
2202                 break;
2203         case KVM_REG_MIPS_CP0_WIRED:
2204                 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2205                 break;
2206         case KVM_REG_MIPS_CP0_PWCTL:
2207                 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2208                         return -EINVAL;
2209                 write_gc0_pwctl(v);
2210                 break;
2211         case KVM_REG_MIPS_CP0_HWRENA:
2212                 write_gc0_hwrena(v);
2213                 break;
2214         case KVM_REG_MIPS_CP0_BADVADDR:
2215                 write_gc0_badvaddr(v);
2216                 break;
2217         case KVM_REG_MIPS_CP0_BADINSTR:
2218                 if (!cpu_guest_has_badinstr)
2219                         return -EINVAL;
2220                 write_gc0_badinstr(v);
2221                 break;
2222         case KVM_REG_MIPS_CP0_BADINSTRP:
2223                 if (!cpu_guest_has_badinstrp)
2224                         return -EINVAL;
2225                 write_gc0_badinstrp(v);
2226                 break;
2227         case KVM_REG_MIPS_CP0_COUNT:
2228                 kvm_mips_write_count(vcpu, v);
2229                 break;
2230         case KVM_REG_MIPS_CP0_ENTRYHI:
2231                 write_gc0_entryhi(v);
2232                 break;
2233         case KVM_REG_MIPS_CP0_COMPARE:
2234                 kvm_mips_write_compare(vcpu, v, false);
2235                 break;
2236         case KVM_REG_MIPS_CP0_STATUS:
2237                 write_gc0_status(v);
2238                 break;
2239         case KVM_REG_MIPS_CP0_INTCTL:
2240                 write_gc0_intctl(v);
2241                 break;
2242         case KVM_REG_MIPS_CP0_CAUSE:
2243                 /*
2244                  * If the timer is stopped or started (DC bit) it must look
2245                  * atomic with changes to the timer interrupt pending bit (TI).
2246                  * A timer interrupt should not happen in between.
2247                  */
2248                 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2249                         if (v & CAUSEF_DC) {
2250                                 /* disable timer first */
2251                                 kvm_mips_count_disable_cause(vcpu);
2252                                 change_gc0_cause((u32)~CAUSEF_DC, v);
2253                         } else {
2254                                 /* enable timer last */
2255                                 change_gc0_cause((u32)~CAUSEF_DC, v);
2256                                 kvm_mips_count_enable_cause(vcpu);
2257                         }
2258                 } else {
2259                         write_gc0_cause(v);
2260                 }
2261                 break;
2262         case KVM_REG_MIPS_CP0_EPC:
2263                 write_gc0_epc(v);
2264                 break;
2265         case KVM_REG_MIPS_CP0_PRID:
2266                 switch (boot_cpu_type()) {
2267                 case CPU_CAVIUM_OCTEON3:
2268                         /* Octeon III has a guest.PRid, but its read-only */
2269                         break;
2270                 default:
2271                         kvm_write_c0_guest_prid(cop0, v);
2272                         break;
2273                 }
2274                 break;
2275         case KVM_REG_MIPS_CP0_EBASE:
2276                 kvm_vz_write_gc0_ebase(v);
2277                 break;
2278         case KVM_REG_MIPS_CP0_CONFIG:
2279                 cur = read_gc0_config();
2280                 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2281                 if (change) {
2282                         v = cur ^ change;
2283                         write_gc0_config(v);
2284                 }
2285                 break;
2286         case KVM_REG_MIPS_CP0_CONFIG1:
2287                 if (!cpu_guest_has_conf1)
2288                         break;
2289                 cur = read_gc0_config1();
2290                 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2291                 if (change) {
2292                         v = cur ^ change;
2293                         write_gc0_config1(v);
2294                 }
2295                 break;
2296         case KVM_REG_MIPS_CP0_CONFIG2:
2297                 if (!cpu_guest_has_conf2)
2298                         break;
2299                 cur = read_gc0_config2();
2300                 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2301                 if (change) {
2302                         v = cur ^ change;
2303                         write_gc0_config2(v);
2304                 }
2305                 break;
2306         case KVM_REG_MIPS_CP0_CONFIG3:
2307                 if (!cpu_guest_has_conf3)
2308                         break;
2309                 cur = read_gc0_config3();
2310                 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2311                 if (change) {
2312                         v = cur ^ change;
2313                         write_gc0_config3(v);
2314                 }
2315                 break;
2316         case KVM_REG_MIPS_CP0_CONFIG4:
2317                 if (!cpu_guest_has_conf4)
2318                         break;
2319                 cur = read_gc0_config4();
2320                 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2321                 if (change) {
2322                         v = cur ^ change;
2323                         write_gc0_config4(v);
2324                 }
2325                 break;
2326         case KVM_REG_MIPS_CP0_CONFIG5:
2327                 if (!cpu_guest_has_conf5)
2328                         break;
2329                 cur = read_gc0_config5();
2330                 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2331                 if (change) {
2332                         v = cur ^ change;
2333                         write_gc0_config5(v);
2334                 }
2335                 break;
2336         case KVM_REG_MIPS_CP0_CONFIG6:
2337                 cur = kvm_read_sw_gc0_config6(cop0);
2338                 change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2339                 if (change) {
2340                         v = cur ^ change;
2341                         kvm_write_sw_gc0_config6(cop0, (int)v);
2342                 }
2343                 break;
2344         case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2345                 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2346                         return -EINVAL;
2347                 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2348                 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2349                         return -EINVAL;
2350                 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2351                 break;
2352         case KVM_REG_MIPS_CP0_MAARI:
2353                 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2354                         return -EINVAL;
2355                 kvm_write_maari(vcpu, v);
2356                 break;
2357 #ifdef CONFIG_64BIT
2358         case KVM_REG_MIPS_CP0_XCONTEXT:
2359                 write_gc0_xcontext(v);
2360                 break;
2361 #endif
2362         case KVM_REG_MIPS_CP0_ERROREPC:
2363                 write_gc0_errorepc(v);
2364                 break;
2365         case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2366                 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2367                 if (!cpu_guest_has_kscr(idx))
2368                         return -EINVAL;
2369                 switch (idx) {
2370                 case 2:
2371                         write_gc0_kscratch1(v);
2372                         break;
2373                 case 3:
2374                         write_gc0_kscratch2(v);
2375                         break;
2376                 case 4:
2377                         write_gc0_kscratch3(v);
2378                         break;
2379                 case 5:
2380                         write_gc0_kscratch4(v);
2381                         break;
2382                 case 6:
2383                         write_gc0_kscratch5(v);
2384                         break;
2385                 case 7:
2386                         write_gc0_kscratch6(v);
2387                         break;
2388                 }
2389                 break;
2390         case KVM_REG_MIPS_COUNT_CTL:
2391                 ret = kvm_mips_set_count_ctl(vcpu, v);
2392                 break;
2393         case KVM_REG_MIPS_COUNT_RESUME:
2394                 ret = kvm_mips_set_count_resume(vcpu, v);
2395                 break;
2396         case KVM_REG_MIPS_COUNT_HZ:
2397                 ret = kvm_mips_set_count_hz(vcpu, v);
2398                 break;
2399         default:
2400                 return -EINVAL;
2401         }
2402         return ret;
2403 }
2404
2405 #define guestid_cache(cpu)      (cpu_data[cpu].guestid_cache)
2406 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2407 {
2408         unsigned long guestid = guestid_cache(cpu);
2409
2410         if (!(++guestid & GUESTID_MASK)) {
2411                 if (cpu_has_vtag_icache)
2412                         flush_icache_all();
2413
2414                 if (!guestid)           /* fix version if needed */
2415                         guestid = GUESTID_FIRST_VERSION;
2416
2417                 ++guestid;              /* guestid 0 reserved for root */
2418
2419                 /* start new guestid cycle */
2420                 kvm_vz_local_flush_roottlb_all_guests();
2421                 kvm_vz_local_flush_guesttlb_all();
2422         }
2423
2424         guestid_cache(cpu) = guestid;
2425 }
2426
2427 /* Returns 1 if the guest TLB may be clobbered */
2428 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2429 {
2430         int ret = 0;
2431         int i;
2432
2433         if (!kvm_request_pending(vcpu))
2434                 return 0;
2435
2436         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2437                 if (cpu_has_guestid) {
2438                         /* Drop all GuestIDs for this VCPU */
2439                         for_each_possible_cpu(i)
2440                                 vcpu->arch.vzguestid[i] = 0;
2441                         /* This will clobber guest TLB contents too */
2442                         ret = 1;
2443                 }
2444                 /*
2445                  * For Root ASID Dealias (RAD) we don't do anything here, but we
2446                  * still need the request to ensure we recheck asid_flush_mask.
2447                  * We can still return 0 as only the root TLB will be affected
2448                  * by a root ASID flush.
2449                  */
2450         }
2451
2452         return ret;
2453 }
2454
2455 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2456 {
2457         unsigned int wired = read_gc0_wired();
2458         struct kvm_mips_tlb *tlbs;
2459         int i;
2460
2461         /* Expand the wired TLB array if necessary */
2462         wired &= MIPSR6_WIRED_WIRED;
2463         if (wired > vcpu->arch.wired_tlb_limit) {
2464                 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2465                                 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2466                 if (WARN_ON(!tlbs)) {
2467                         /* Save whatever we can */
2468                         wired = vcpu->arch.wired_tlb_limit;
2469                 } else {
2470                         vcpu->arch.wired_tlb = tlbs;
2471                         vcpu->arch.wired_tlb_limit = wired;
2472                 }
2473         }
2474
2475         if (wired)
2476                 /* Save wired entries from the guest TLB */
2477                 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2478         /* Invalidate any dropped entries since last time */
2479         for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2480                 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2481                 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2482                 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2483                 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2484         }
2485         vcpu->arch.wired_tlb_used = wired;
2486 }
2487
2488 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2489 {
2490         /* Load wired entries into the guest TLB */
2491         if (vcpu->arch.wired_tlb)
2492                 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2493                                      vcpu->arch.wired_tlb_used);
2494 }
2495
2496 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2497 {
2498         struct kvm *kvm = vcpu->kvm;
2499         struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2500         bool migrated;
2501
2502         /*
2503          * Are we entering guest context on a different CPU to last time?
2504          * If so, the VCPU's guest TLB state on this CPU may be stale.
2505          */
2506         migrated = (vcpu->arch.last_exec_cpu != cpu);
2507         vcpu->arch.last_exec_cpu = cpu;
2508
2509         /*
2510          * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2511          * remains set until another vcpu is loaded in.  As a rule GuestRID
2512          * remains zeroed when in root context unless the kernel is busy
2513          * manipulating guest tlb entries.
2514          */
2515         if (cpu_has_guestid) {
2516                 /*
2517                  * Check if our GuestID is of an older version and thus invalid.
2518                  *
2519                  * We also discard the stored GuestID if we've executed on
2520                  * another CPU, as the guest mappings may have changed without
2521                  * hypervisor knowledge.
2522                  */
2523                 if (migrated ||
2524                     (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2525                                         GUESTID_VERSION_MASK) {
2526                         kvm_vz_get_new_guestid(cpu, vcpu);
2527                         vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2528                         trace_kvm_guestid_change(vcpu,
2529                                                  vcpu->arch.vzguestid[cpu]);
2530                 }
2531
2532                 /* Restore GuestID */
2533                 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2534         } else {
2535                 /*
2536                  * The Guest TLB only stores a single guest's TLB state, so
2537                  * flush it if another VCPU has executed on this CPU.
2538                  *
2539                  * We also flush if we've executed on another CPU, as the guest
2540                  * mappings may have changed without hypervisor knowledge.
2541                  */
2542                 if (migrated || last_exec_vcpu[cpu] != vcpu)
2543                         kvm_vz_local_flush_guesttlb_all();
2544                 last_exec_vcpu[cpu] = vcpu;
2545
2546                 /*
2547                  * Root ASID dealiases guest GPA mappings in the root TLB.
2548                  * Allocate new root ASID if needed.
2549                  */
2550                 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2551                         get_new_mmu_context(gpa_mm);
2552                 else
2553                         check_mmu_context(gpa_mm);
2554         }
2555 }
2556
2557 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2558 {
2559         struct mips_coproc *cop0 = vcpu->arch.cop0;
2560         bool migrated, all;
2561
2562         /*
2563          * Have we migrated to a different CPU?
2564          * If so, any old guest TLB state may be stale.
2565          */
2566         migrated = (vcpu->arch.last_sched_cpu != cpu);
2567
2568         /*
2569          * Was this the last VCPU to run on this CPU?
2570          * If not, any old guest state from this VCPU will have been clobbered.
2571          */
2572         all = migrated || (last_vcpu[cpu] != vcpu);
2573         last_vcpu[cpu] = vcpu;
2574
2575         /*
2576          * Restore CP0_Wired unconditionally as we clear it after use, and
2577          * restore wired guest TLB entries (while in guest context).
2578          */
2579         kvm_restore_gc0_wired(cop0);
2580         if (current->flags & PF_VCPU) {
2581                 tlbw_use_hazard();
2582                 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2583                 kvm_vz_vcpu_load_wired(vcpu);
2584         }
2585
2586         /*
2587          * Restore timer state regardless, as e.g. Cause.TI can change over time
2588          * if left unmaintained.
2589          */
2590         kvm_vz_restore_timer(vcpu);
2591
2592         /* Set MC bit if we want to trace guest mode changes */
2593         if (kvm_trace_guest_mode_change)
2594                 set_c0_guestctl0(MIPS_GCTL0_MC);
2595         else
2596                 clear_c0_guestctl0(MIPS_GCTL0_MC);
2597
2598         /* Don't bother restoring registers multiple times unless necessary */
2599         if (!all)
2600                 return 0;
2601
2602         /*
2603          * Restore config registers first, as some implementations restrict
2604          * writes to other registers when the corresponding feature bits aren't
2605          * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2606          */
2607         kvm_restore_gc0_config(cop0);
2608         if (cpu_guest_has_conf1)
2609                 kvm_restore_gc0_config1(cop0);
2610         if (cpu_guest_has_conf2)
2611                 kvm_restore_gc0_config2(cop0);
2612         if (cpu_guest_has_conf3)
2613                 kvm_restore_gc0_config3(cop0);
2614         if (cpu_guest_has_conf4)
2615                 kvm_restore_gc0_config4(cop0);
2616         if (cpu_guest_has_conf5)
2617                 kvm_restore_gc0_config5(cop0);
2618         if (cpu_guest_has_conf6)
2619                 kvm_restore_gc0_config6(cop0);
2620         if (cpu_guest_has_conf7)
2621                 kvm_restore_gc0_config7(cop0);
2622
2623         kvm_restore_gc0_index(cop0);
2624         kvm_restore_gc0_entrylo0(cop0);
2625         kvm_restore_gc0_entrylo1(cop0);
2626         kvm_restore_gc0_context(cop0);
2627         if (cpu_guest_has_contextconfig)
2628                 kvm_restore_gc0_contextconfig(cop0);
2629 #ifdef CONFIG_64BIT
2630         kvm_restore_gc0_xcontext(cop0);
2631         if (cpu_guest_has_contextconfig)
2632                 kvm_restore_gc0_xcontextconfig(cop0);
2633 #endif
2634         kvm_restore_gc0_pagemask(cop0);
2635         kvm_restore_gc0_pagegrain(cop0);
2636         kvm_restore_gc0_hwrena(cop0);
2637         kvm_restore_gc0_badvaddr(cop0);
2638         kvm_restore_gc0_entryhi(cop0);
2639         kvm_restore_gc0_status(cop0);
2640         kvm_restore_gc0_intctl(cop0);
2641         kvm_restore_gc0_epc(cop0);
2642         kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2643         if (cpu_guest_has_userlocal)
2644                 kvm_restore_gc0_userlocal(cop0);
2645
2646         kvm_restore_gc0_errorepc(cop0);
2647
2648         /* restore KScratch registers if enabled in guest */
2649         if (cpu_guest_has_conf4) {
2650                 if (cpu_guest_has_kscr(2))
2651                         kvm_restore_gc0_kscratch1(cop0);
2652                 if (cpu_guest_has_kscr(3))
2653                         kvm_restore_gc0_kscratch2(cop0);
2654                 if (cpu_guest_has_kscr(4))
2655                         kvm_restore_gc0_kscratch3(cop0);
2656                 if (cpu_guest_has_kscr(5))
2657                         kvm_restore_gc0_kscratch4(cop0);
2658                 if (cpu_guest_has_kscr(6))
2659                         kvm_restore_gc0_kscratch5(cop0);
2660                 if (cpu_guest_has_kscr(7))
2661                         kvm_restore_gc0_kscratch6(cop0);
2662         }
2663
2664         if (cpu_guest_has_badinstr)
2665                 kvm_restore_gc0_badinstr(cop0);
2666         if (cpu_guest_has_badinstrp)
2667                 kvm_restore_gc0_badinstrp(cop0);
2668
2669         if (cpu_guest_has_segments) {
2670                 kvm_restore_gc0_segctl0(cop0);
2671                 kvm_restore_gc0_segctl1(cop0);
2672                 kvm_restore_gc0_segctl2(cop0);
2673         }
2674
2675         /* restore HTW registers */
2676         if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2677                 kvm_restore_gc0_pwbase(cop0);
2678                 kvm_restore_gc0_pwfield(cop0);
2679                 kvm_restore_gc0_pwsize(cop0);
2680                 kvm_restore_gc0_pwctl(cop0);
2681         }
2682
2683         /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2684         if (cpu_has_guestctl2)
2685                 write_c0_guestctl2(
2686                         cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2687
2688         /*
2689          * We should clear linked load bit to break interrupted atomics. This
2690          * prevents a SC on the next VCPU from succeeding by matching a LL on
2691          * the previous VCPU.
2692          */
2693         if (vcpu->kvm->created_vcpus > 1)
2694                 write_gc0_lladdr(0);
2695
2696         return 0;
2697 }
2698
2699 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2700 {
2701         struct mips_coproc *cop0 = vcpu->arch.cop0;
2702
2703         if (current->flags & PF_VCPU)
2704                 kvm_vz_vcpu_save_wired(vcpu);
2705
2706         kvm_lose_fpu(vcpu);
2707
2708         kvm_save_gc0_index(cop0);
2709         kvm_save_gc0_entrylo0(cop0);
2710         kvm_save_gc0_entrylo1(cop0);
2711         kvm_save_gc0_context(cop0);
2712         if (cpu_guest_has_contextconfig)
2713                 kvm_save_gc0_contextconfig(cop0);
2714 #ifdef CONFIG_64BIT
2715         kvm_save_gc0_xcontext(cop0);
2716         if (cpu_guest_has_contextconfig)
2717                 kvm_save_gc0_xcontextconfig(cop0);
2718 #endif
2719         kvm_save_gc0_pagemask(cop0);
2720         kvm_save_gc0_pagegrain(cop0);
2721         kvm_save_gc0_wired(cop0);
2722         /* allow wired TLB entries to be overwritten */
2723         clear_gc0_wired(MIPSR6_WIRED_WIRED);
2724         kvm_save_gc0_hwrena(cop0);
2725         kvm_save_gc0_badvaddr(cop0);
2726         kvm_save_gc0_entryhi(cop0);
2727         kvm_save_gc0_status(cop0);
2728         kvm_save_gc0_intctl(cop0);
2729         kvm_save_gc0_epc(cop0);
2730         kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2731         if (cpu_guest_has_userlocal)
2732                 kvm_save_gc0_userlocal(cop0);
2733
2734         /* only save implemented config registers */
2735         kvm_save_gc0_config(cop0);
2736         if (cpu_guest_has_conf1)
2737                 kvm_save_gc0_config1(cop0);
2738         if (cpu_guest_has_conf2)
2739                 kvm_save_gc0_config2(cop0);
2740         if (cpu_guest_has_conf3)
2741                 kvm_save_gc0_config3(cop0);
2742         if (cpu_guest_has_conf4)
2743                 kvm_save_gc0_config4(cop0);
2744         if (cpu_guest_has_conf5)
2745                 kvm_save_gc0_config5(cop0);
2746         if (cpu_guest_has_conf6)
2747                 kvm_save_gc0_config6(cop0);
2748         if (cpu_guest_has_conf7)
2749                 kvm_save_gc0_config7(cop0);
2750
2751         kvm_save_gc0_errorepc(cop0);
2752
2753         /* save KScratch registers if enabled in guest */
2754         if (cpu_guest_has_conf4) {
2755                 if (cpu_guest_has_kscr(2))
2756                         kvm_save_gc0_kscratch1(cop0);
2757                 if (cpu_guest_has_kscr(3))
2758                         kvm_save_gc0_kscratch2(cop0);
2759                 if (cpu_guest_has_kscr(4))
2760                         kvm_save_gc0_kscratch3(cop0);
2761                 if (cpu_guest_has_kscr(5))
2762                         kvm_save_gc0_kscratch4(cop0);
2763                 if (cpu_guest_has_kscr(6))
2764                         kvm_save_gc0_kscratch5(cop0);
2765                 if (cpu_guest_has_kscr(7))
2766                         kvm_save_gc0_kscratch6(cop0);
2767         }
2768
2769         if (cpu_guest_has_badinstr)
2770                 kvm_save_gc0_badinstr(cop0);
2771         if (cpu_guest_has_badinstrp)
2772                 kvm_save_gc0_badinstrp(cop0);
2773
2774         if (cpu_guest_has_segments) {
2775                 kvm_save_gc0_segctl0(cop0);
2776                 kvm_save_gc0_segctl1(cop0);
2777                 kvm_save_gc0_segctl2(cop0);
2778         }
2779
2780         /* save HTW registers if enabled in guest */
2781         if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2782             kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2783                 kvm_save_gc0_pwbase(cop0);
2784                 kvm_save_gc0_pwfield(cop0);
2785                 kvm_save_gc0_pwsize(cop0);
2786                 kvm_save_gc0_pwctl(cop0);
2787         }
2788
2789         kvm_vz_save_timer(vcpu);
2790
2791         /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2792         if (cpu_has_guestctl2)
2793                 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2794                         read_c0_guestctl2();
2795
2796         return 0;
2797 }
2798
2799 /**
2800  * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2801  * @size:       Number of guest VTLB entries (0 < @size <= root VTLB entries).
2802  *
2803  * Attempt to resize the guest VTLB by writing guest Config registers. This is
2804  * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2805  * entries in the root VTLB.
2806  *
2807  * Returns:     The resulting guest VTLB size.
2808  */
2809 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2810 {
2811         unsigned int config4 = 0, ret = 0, limit;
2812
2813         /* Write MMUSize - 1 into guest Config registers */
2814         if (cpu_guest_has_conf1)
2815                 change_gc0_config1(MIPS_CONF1_TLBS,
2816                                    (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2817         if (cpu_guest_has_conf4) {
2818                 config4 = read_gc0_config4();
2819                 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2820                     MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2821                         config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2822                         config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2823                                 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2824                 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2825                            MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2826                         config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2827                         config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2828                                 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2829                 }
2830                 write_gc0_config4(config4);
2831         }
2832
2833         /*
2834          * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2835          * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2836          * not dropped)
2837          */
2838         if (cpu_has_mips_r6) {
2839                 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2840                                                 MIPSR6_WIRED_LIMIT_SHIFT;
2841                 if (size - 1 <= limit)
2842                         limit = 0;
2843                 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2844         }
2845
2846         /* Read back MMUSize - 1 */
2847         back_to_back_c0_hazard();
2848         if (cpu_guest_has_conf1)
2849                 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2850                                                 MIPS_CONF1_TLBS_SHIFT;
2851         if (config4) {
2852                 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2853                     MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2854                         ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2855                                 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2856                                 MIPS_CONF1_TLBS_SIZE;
2857                 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2858                          MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2859                         ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2860                                 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2861                                 MIPS_CONF1_TLBS_SIZE;
2862         }
2863         return ret + 1;
2864 }
2865
2866 static int kvm_vz_hardware_enable(void)
2867 {
2868         unsigned int mmu_size, guest_mmu_size, ftlb_size;
2869         u64 guest_cvmctl, cvmvmconfig;
2870
2871         switch (current_cpu_type()) {
2872         case CPU_CAVIUM_OCTEON3:
2873                 /* Set up guest timer/perfcount IRQ lines */
2874                 guest_cvmctl = read_gc0_cvmctl();
2875                 guest_cvmctl &= ~CVMCTL_IPTI;
2876                 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2877                 guest_cvmctl &= ~CVMCTL_IPPCI;
2878                 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2879                 write_gc0_cvmctl(guest_cvmctl);
2880
2881                 cvmvmconfig = read_c0_cvmvmconfig();
2882                 /* No I/O hole translation. */
2883                 cvmvmconfig |= CVMVMCONF_DGHT;
2884                 /* Halve the root MMU size */
2885                 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2886                             >> CVMVMCONF_MMUSIZEM1_S) + 1;
2887                 guest_mmu_size = mmu_size / 2;
2888                 mmu_size -= guest_mmu_size;
2889                 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2890                 cvmvmconfig |= mmu_size - 1;
2891                 write_c0_cvmvmconfig(cvmvmconfig);
2892
2893                 /* Update our records */
2894                 current_cpu_data.tlbsize = mmu_size;
2895                 current_cpu_data.tlbsizevtlb = mmu_size;
2896                 current_cpu_data.guest.tlbsize = guest_mmu_size;
2897
2898                 /* Flush moved entries in new (guest) context */
2899                 kvm_vz_local_flush_guesttlb_all();
2900                 break;
2901         default:
2902                 /*
2903                  * ImgTec cores tend to use a shared root/guest TLB. To avoid
2904                  * overlap of root wired and guest entries, the guest TLB may
2905                  * need resizing.
2906                  */
2907                 mmu_size = current_cpu_data.tlbsizevtlb;
2908                 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2909
2910                 /* Try switching to maximum guest VTLB size for flush */
2911                 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2912                 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2913                 kvm_vz_local_flush_guesttlb_all();
2914
2915                 /*
2916                  * Reduce to make space for root wired entries and at least 2
2917                  * root non-wired entries. This does assume that long-term wired
2918                  * entries won't be added later.
2919                  */
2920                 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2921                 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2922                 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2923
2924                 /*
2925                  * Write the VTLB size, but if another CPU has already written,
2926                  * check it matches or we won't provide a consistent view to the
2927                  * guest. If this ever happens it suggests an asymmetric number
2928                  * of wired entries.
2929                  */
2930                 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2931                     WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2932                          "Available guest VTLB size mismatch"))
2933                         return -EINVAL;
2934                 break;
2935         }
2936
2937         /*
2938          * Enable virtualization features granting guest direct control of
2939          * certain features:
2940          * CP0=1:       Guest coprocessor 0 context.
2941          * AT=Guest:    Guest MMU.
2942          * CG=1:        Hit (virtual address) CACHE operations (optional).
2943          * CF=1:        Guest Config registers.
2944          * CGI=1:       Indexed flush CACHE operations (optional).
2945          */
2946         write_c0_guestctl0(MIPS_GCTL0_CP0 |
2947                            (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2948                            MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2949         if (cpu_has_guestctl0ext) {
2950                 if (current_cpu_type() != CPU_LOONGSON64)
2951                         set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2952                 else
2953                         clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2954         }
2955
2956         if (cpu_has_guestid) {
2957                 write_c0_guestctl1(0);
2958                 kvm_vz_local_flush_roottlb_all_guests();
2959
2960                 GUESTID_MASK = current_cpu_data.guestid_mask;
2961                 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2962                 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2963
2964                 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2965         }
2966
2967         /* clear any pending injected virtual guest interrupts */
2968         if (cpu_has_guestctl2)
2969                 clear_c0_guestctl2(0x3f << 10);
2970
2971 #ifdef CONFIG_CPU_LOONGSON64
2972         /* Control guest CCA attribute */
2973         if (cpu_has_csr())
2974                 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2975 #endif
2976
2977         return 0;
2978 }
2979
2980 static void kvm_vz_hardware_disable(void)
2981 {
2982         u64 cvmvmconfig;
2983         unsigned int mmu_size;
2984
2985         /* Flush any remaining guest TLB entries */
2986         kvm_vz_local_flush_guesttlb_all();
2987
2988         switch (current_cpu_type()) {
2989         case CPU_CAVIUM_OCTEON3:
2990                 /*
2991                  * Allocate whole TLB for root. Existing guest TLB entries will
2992                  * change ownership to the root TLB. We should be safe though as
2993                  * they've already been flushed above while in guest TLB.
2994                  */
2995                 cvmvmconfig = read_c0_cvmvmconfig();
2996                 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2997                             >> CVMVMCONF_MMUSIZEM1_S) + 1;
2998                 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2999                 cvmvmconfig |= mmu_size - 1;
3000                 write_c0_cvmvmconfig(cvmvmconfig);
3001
3002                 /* Update our records */
3003                 current_cpu_data.tlbsize = mmu_size;
3004                 current_cpu_data.tlbsizevtlb = mmu_size;
3005                 current_cpu_data.guest.tlbsize = 0;
3006
3007                 /* Flush moved entries in new (root) context */
3008                 local_flush_tlb_all();
3009                 break;
3010         }
3011
3012         if (cpu_has_guestid) {
3013                 write_c0_guestctl1(0);
3014                 kvm_vz_local_flush_roottlb_all_guests();
3015         }
3016 }
3017
3018 static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3019 {
3020         int r;
3021
3022         switch (ext) {
3023         case KVM_CAP_MIPS_VZ:
3024                 /* we wouldn't be here unless cpu_has_vz */
3025                 r = 1;
3026                 break;
3027 #ifdef CONFIG_64BIT
3028         case KVM_CAP_MIPS_64BIT:
3029                 /* We support 64-bit registers/operations and addresses */
3030                 r = 2;
3031                 break;
3032 #endif
3033         case KVM_CAP_IOEVENTFD:
3034                 r = 1;
3035                 break;
3036         default:
3037                 r = 0;
3038                 break;
3039         }
3040
3041         return r;
3042 }
3043
3044 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3045 {
3046         int i;
3047
3048         for_each_possible_cpu(i)
3049                 vcpu->arch.vzguestid[i] = 0;
3050
3051         return 0;
3052 }
3053
3054 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3055 {
3056         int cpu;
3057
3058         /*
3059          * If the VCPU is freed and reused as another VCPU, we don't want the
3060          * matching pointer wrongly hanging around in last_vcpu[] or
3061          * last_exec_vcpu[].
3062          */
3063         for_each_possible_cpu(cpu) {
3064                 if (last_vcpu[cpu] == vcpu)
3065                         last_vcpu[cpu] = NULL;
3066                 if (last_exec_vcpu[cpu] == vcpu)
3067                         last_exec_vcpu[cpu] = NULL;
3068         }
3069 }
3070
3071 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3072 {
3073         struct mips_coproc *cop0 = vcpu->arch.cop0;
3074         unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3075
3076         /*
3077          * Start off the timer at the same frequency as the host timer, but the
3078          * soft timer doesn't handle frequencies greater than 1GHz yet.
3079          */
3080         if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3081                 count_hz = mips_hpt_frequency;
3082         kvm_mips_init_count(vcpu, count_hz);
3083
3084         /*
3085          * Initialize guest register state to valid architectural reset state.
3086          */
3087
3088         /* PageGrain */
3089         if (cpu_has_mips_r5 || cpu_has_mips_r6)
3090                 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3091         /* Wired */
3092         if (cpu_has_mips_r6)
3093                 kvm_write_sw_gc0_wired(cop0,
3094                                        read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3095         /* Status */
3096         kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3097         if (cpu_has_mips_r5 || cpu_has_mips_r6)
3098                 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3099         /* IntCtl */
3100         kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3101                                 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3102         /* PRId */
3103         kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3104         /* EBase */
3105         kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3106         /* Config */
3107         kvm_save_gc0_config(cop0);
3108         /* architecturally writable (e.g. from guest) */
3109         kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3110                                  _page_cachable_default >> _CACHE_SHIFT);
3111         /* architecturally read only, but maybe writable from root */
3112         kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3113         if (cpu_guest_has_conf1) {
3114                 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3115                 /* Config1 */
3116                 kvm_save_gc0_config1(cop0);
3117                 /* architecturally read only, but maybe writable from root */
3118                 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2    |
3119                                                MIPS_CONF1_MD    |
3120                                                MIPS_CONF1_PC    |
3121                                                MIPS_CONF1_WR    |
3122                                                MIPS_CONF1_CA    |
3123                                                MIPS_CONF1_FP);
3124         }
3125         if (cpu_guest_has_conf2) {
3126                 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3127                 /* Config2 */
3128                 kvm_save_gc0_config2(cop0);
3129         }
3130         if (cpu_guest_has_conf3) {
3131                 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3132                 /* Config3 */
3133                 kvm_save_gc0_config3(cop0);
3134                 /* architecturally writable (e.g. from guest) */
3135                 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3136                 /* architecturally read only, but maybe writable from root */
3137                 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA   |
3138                                                MIPS_CONF3_BPG   |
3139                                                MIPS_CONF3_ULRI  |
3140                                                MIPS_CONF3_DSP   |
3141                                                MIPS_CONF3_CTXTC |
3142                                                MIPS_CONF3_ITL   |
3143                                                MIPS_CONF3_LPA   |
3144                                                MIPS_CONF3_VEIC  |
3145                                                MIPS_CONF3_VINT  |
3146                                                MIPS_CONF3_SP    |
3147                                                MIPS_CONF3_CDMM  |
3148                                                MIPS_CONF3_MT    |
3149                                                MIPS_CONF3_SM    |
3150                                                MIPS_CONF3_TL);
3151         }
3152         if (cpu_guest_has_conf4) {
3153                 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3154                 /* Config4 */
3155                 kvm_save_gc0_config4(cop0);
3156         }
3157         if (cpu_guest_has_conf5) {
3158                 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3159                 /* Config5 */
3160                 kvm_save_gc0_config5(cop0);
3161                 /* architecturally writable (e.g. from guest) */
3162                 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K     |
3163                                                MIPS_CONF5_CV    |
3164                                                MIPS_CONF5_MSAEN |
3165                                                MIPS_CONF5_UFE   |
3166                                                MIPS_CONF5_FRE   |
3167                                                MIPS_CONF5_SBRI  |
3168                                                MIPS_CONF5_UFR);
3169                 /* architecturally read only, but maybe writable from root */
3170                 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3171         }
3172
3173         if (cpu_guest_has_contextconfig) {
3174                 /* ContextConfig */
3175                 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3176 #ifdef CONFIG_64BIT
3177                 /* XContextConfig */
3178                 /* bits SEGBITS-13+3:4 set */
3179                 kvm_write_sw_gc0_xcontextconfig(cop0,
3180                                         ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3181 #endif
3182         }
3183
3184         /* Implementation dependent, use the legacy layout */
3185         if (cpu_guest_has_segments) {
3186                 /* SegCtl0, SegCtl1, SegCtl2 */
3187                 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3188                 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3189                                 (_page_cachable_default >> _CACHE_SHIFT) <<
3190                                                 (16 + MIPS_SEGCFG_C_SHIFT));
3191                 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3192         }
3193
3194         /* reset HTW registers */
3195         if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3196                 /* PWField */
3197                 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3198                 /* PWSize */
3199                 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3200         }
3201
3202         /* start with no pending virtual guest interrupts */
3203         if (cpu_has_guestctl2)
3204                 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3205
3206         /* Put PC at reset vector */
3207         vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3208
3209         return 0;
3210 }
3211
3212 static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
3213 {
3214         if (!cpu_has_guestid) {
3215                 /*
3216                  * For each CPU there is a single GPA ASID used by all VCPUs in
3217                  * the VM, so it doesn't make sense for the VCPUs to handle
3218                  * invalidation of these ASIDs individually.
3219                  *
3220                  * Instead mark all CPUs as needing ASID invalidation in
3221                  * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
3222                  * kick any running VCPUs so they check asid_flush_mask.
3223                  */
3224                 cpumask_setall(&kvm->arch.asid_flush_mask);
3225         }
3226 }
3227
3228 static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3229 {
3230         int cpu = smp_processor_id();
3231         int preserve_guest_tlb;
3232
3233         preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3234
3235         if (preserve_guest_tlb)
3236                 kvm_vz_vcpu_save_wired(vcpu);
3237
3238         kvm_vz_vcpu_load_tlb(vcpu, cpu);
3239
3240         if (preserve_guest_tlb)
3241                 kvm_vz_vcpu_load_wired(vcpu);
3242 }
3243
3244 static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3245 {
3246         int cpu = smp_processor_id();
3247         int r;
3248
3249         kvm_vz_acquire_htimer(vcpu);
3250         /* Check if we have any exceptions/interrupts pending */
3251         kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3252
3253         kvm_vz_check_requests(vcpu, cpu);
3254         kvm_vz_vcpu_load_tlb(vcpu, cpu);
3255         kvm_vz_vcpu_load_wired(vcpu);
3256
3257         r = vcpu->arch.vcpu_run(vcpu);
3258
3259         kvm_vz_vcpu_save_wired(vcpu);
3260
3261         return r;
3262 }
3263
3264 static struct kvm_mips_callbacks kvm_vz_callbacks = {
3265         .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3266         .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3267         .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3268         .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3269         .handle_addr_err_st = kvm_trap_vz_no_handler,
3270         .handle_addr_err_ld = kvm_trap_vz_no_handler,
3271         .handle_syscall = kvm_trap_vz_no_handler,
3272         .handle_res_inst = kvm_trap_vz_no_handler,
3273         .handle_break = kvm_trap_vz_no_handler,
3274         .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3275         .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3276
3277         .hardware_enable = kvm_vz_hardware_enable,
3278         .hardware_disable = kvm_vz_hardware_disable,
3279         .check_extension = kvm_vz_check_extension,
3280         .vcpu_init = kvm_vz_vcpu_init,
3281         .vcpu_uninit = kvm_vz_vcpu_uninit,
3282         .vcpu_setup = kvm_vz_vcpu_setup,
3283         .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
3284         .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3285         .queue_timer_int = kvm_vz_queue_timer_int_cb,
3286         .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3287         .queue_io_int = kvm_vz_queue_io_int_cb,
3288         .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3289         .irq_deliver = kvm_vz_irq_deliver_cb,
3290         .irq_clear = kvm_vz_irq_clear_cb,
3291         .num_regs = kvm_vz_num_regs,
3292         .copy_reg_indices = kvm_vz_copy_reg_indices,
3293         .get_one_reg = kvm_vz_get_one_reg,
3294         .set_one_reg = kvm_vz_set_one_reg,
3295         .vcpu_load = kvm_vz_vcpu_load,
3296         .vcpu_put = kvm_vz_vcpu_put,
3297         .vcpu_run = kvm_vz_vcpu_run,
3298         .vcpu_reenter = kvm_vz_vcpu_reenter,
3299 };
3300
3301 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3302 {
3303         if (!cpu_has_vz)
3304                 return -ENODEV;
3305
3306         /*
3307          * VZ requires at least 2 KScratch registers, so it should have been
3308          * possible to allocate pgd_reg.
3309          */
3310         if (WARN(pgd_reg == -1,
3311                  "pgd_reg not allocated even though cpu_has_vz\n"))
3312                 return -ENODEV;
3313
3314         pr_info("Starting KVM with MIPS VZ extensions\n");
3315
3316         *install_callbacks = &kvm_vz_callbacks;
3317         return 0;
3318 }