KVM: PPC: Move kvm_guest_enter call into generic code
[linux-2.6-microblaze.git] / arch / powerpc / kvm / booke.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2010-2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20  *          Scott Wood <scottwood@freescale.com>
21  *          Varun Sethi <varun.sethi@freescale.com>
22  */
23
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
30 #include <linux/fs.h>
31
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
38 #include <asm/irq.h>
39
40 #include "timing.h"
41 #include "booke.h"
42 #include "trace.h"
43
44 unsigned long kvmppc_booke_handlers;
45
46 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48
49 struct kvm_stats_debugfs_item debugfs_entries[] = {
50         { "mmio",       VCPU_STAT(mmio_exits) },
51         { "dcr",        VCPU_STAT(dcr_exits) },
52         { "sig",        VCPU_STAT(signal_exits) },
53         { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
54         { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
55         { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
56         { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
57         { "sysc",       VCPU_STAT(syscall_exits) },
58         { "isi",        VCPU_STAT(isi_exits) },
59         { "dsi",        VCPU_STAT(dsi_exits) },
60         { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
61         { "dec",        VCPU_STAT(dec_exits) },
62         { "ext_intr",   VCPU_STAT(ext_intr_exits) },
63         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
64         { "doorbell", VCPU_STAT(dbell_exits) },
65         { "guest doorbell", VCPU_STAT(gdbell_exits) },
66         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
67         { NULL }
68 };
69
70 /* TODO: use vcpu_printf() */
71 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
72 {
73         int i;
74
75         printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
76         printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
77         printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
78                                             vcpu->arch.shared->srr1);
79
80         printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
81
82         for (i = 0; i < 32; i += 4) {
83                 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
84                        kvmppc_get_gpr(vcpu, i),
85                        kvmppc_get_gpr(vcpu, i+1),
86                        kvmppc_get_gpr(vcpu, i+2),
87                        kvmppc_get_gpr(vcpu, i+3));
88         }
89 }
90
91 #ifdef CONFIG_SPE
92 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
93 {
94         preempt_disable();
95         enable_kernel_spe();
96         kvmppc_save_guest_spe(vcpu);
97         vcpu->arch.shadow_msr &= ~MSR_SPE;
98         preempt_enable();
99 }
100
101 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
102 {
103         preempt_disable();
104         enable_kernel_spe();
105         kvmppc_load_guest_spe(vcpu);
106         vcpu->arch.shadow_msr |= MSR_SPE;
107         preempt_enable();
108 }
109
110 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
111 {
112         if (vcpu->arch.shared->msr & MSR_SPE) {
113                 if (!(vcpu->arch.shadow_msr & MSR_SPE))
114                         kvmppc_vcpu_enable_spe(vcpu);
115         } else if (vcpu->arch.shadow_msr & MSR_SPE) {
116                 kvmppc_vcpu_disable_spe(vcpu);
117         }
118 }
119 #else
120 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
121 {
122 }
123 #endif
124
125 /*
126  * Helper function for "full" MSR writes.  No need to call this if only
127  * EE/CE/ME/DE/RI are changing.
128  */
129 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
130 {
131         u32 old_msr = vcpu->arch.shared->msr;
132
133 #ifdef CONFIG_KVM_BOOKE_HV
134         new_msr |= MSR_GS;
135 #endif
136
137         vcpu->arch.shared->msr = new_msr;
138
139         kvmppc_mmu_msr_notify(vcpu, old_msr);
140         kvmppc_vcpu_sync_spe(vcpu);
141 }
142
143 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
144                                        unsigned int priority)
145 {
146         trace_kvm_booke_queue_irqprio(vcpu, priority);
147         set_bit(priority, &vcpu->arch.pending_exceptions);
148 }
149
150 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
151                                         ulong dear_flags, ulong esr_flags)
152 {
153         vcpu->arch.queued_dear = dear_flags;
154         vcpu->arch.queued_esr = esr_flags;
155         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
156 }
157
158 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
159                                            ulong dear_flags, ulong esr_flags)
160 {
161         vcpu->arch.queued_dear = dear_flags;
162         vcpu->arch.queued_esr = esr_flags;
163         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
164 }
165
166 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
167                                            ulong esr_flags)
168 {
169         vcpu->arch.queued_esr = esr_flags;
170         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
171 }
172
173 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
174 {
175         vcpu->arch.queued_esr = esr_flags;
176         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
177 }
178
179 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
180 {
181         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
182 }
183
184 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
185 {
186         return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
187 }
188
189 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
190 {
191         clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
192 }
193
194 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
195                                 struct kvm_interrupt *irq)
196 {
197         unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
198
199         if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
200                 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
201
202         kvmppc_booke_queue_irqprio(vcpu, prio);
203 }
204
205 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
206                                   struct kvm_interrupt *irq)
207 {
208         clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
209         clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
210 }
211
212 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
213 {
214 #ifdef CONFIG_KVM_BOOKE_HV
215         mtspr(SPRN_GSRR0, srr0);
216         mtspr(SPRN_GSRR1, srr1);
217 #else
218         vcpu->arch.shared->srr0 = srr0;
219         vcpu->arch.shared->srr1 = srr1;
220 #endif
221 }
222
223 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
224 {
225         vcpu->arch.csrr0 = srr0;
226         vcpu->arch.csrr1 = srr1;
227 }
228
229 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
230 {
231         if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
232                 vcpu->arch.dsrr0 = srr0;
233                 vcpu->arch.dsrr1 = srr1;
234         } else {
235                 set_guest_csrr(vcpu, srr0, srr1);
236         }
237 }
238
239 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
240 {
241         vcpu->arch.mcsrr0 = srr0;
242         vcpu->arch.mcsrr1 = srr1;
243 }
244
245 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
246 {
247 #ifdef CONFIG_KVM_BOOKE_HV
248         return mfspr(SPRN_GDEAR);
249 #else
250         return vcpu->arch.shared->dar;
251 #endif
252 }
253
254 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
255 {
256 #ifdef CONFIG_KVM_BOOKE_HV
257         mtspr(SPRN_GDEAR, dear);
258 #else
259         vcpu->arch.shared->dar = dear;
260 #endif
261 }
262
263 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
264 {
265 #ifdef CONFIG_KVM_BOOKE_HV
266         return mfspr(SPRN_GESR);
267 #else
268         return vcpu->arch.shared->esr;
269 #endif
270 }
271
272 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
273 {
274 #ifdef CONFIG_KVM_BOOKE_HV
275         mtspr(SPRN_GESR, esr);
276 #else
277         vcpu->arch.shared->esr = esr;
278 #endif
279 }
280
281 /* Deliver the interrupt of the corresponding priority, if possible. */
282 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
283                                         unsigned int priority)
284 {
285         int allowed = 0;
286         ulong msr_mask = 0;
287         bool update_esr = false, update_dear = false;
288         ulong crit_raw = vcpu->arch.shared->critical;
289         ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
290         bool crit;
291         bool keep_irq = false;
292         enum int_class int_class;
293
294         /* Truncate crit indicators in 32 bit mode */
295         if (!(vcpu->arch.shared->msr & MSR_SF)) {
296                 crit_raw &= 0xffffffff;
297                 crit_r1 &= 0xffffffff;
298         }
299
300         /* Critical section when crit == r1 */
301         crit = (crit_raw == crit_r1);
302         /* ... and we're in supervisor mode */
303         crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
304
305         if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
306                 priority = BOOKE_IRQPRIO_EXTERNAL;
307                 keep_irq = true;
308         }
309
310         switch (priority) {
311         case BOOKE_IRQPRIO_DTLB_MISS:
312         case BOOKE_IRQPRIO_DATA_STORAGE:
313                 update_dear = true;
314                 /* fall through */
315         case BOOKE_IRQPRIO_INST_STORAGE:
316         case BOOKE_IRQPRIO_PROGRAM:
317                 update_esr = true;
318                 /* fall through */
319         case BOOKE_IRQPRIO_ITLB_MISS:
320         case BOOKE_IRQPRIO_SYSCALL:
321         case BOOKE_IRQPRIO_FP_UNAVAIL:
322         case BOOKE_IRQPRIO_SPE_UNAVAIL:
323         case BOOKE_IRQPRIO_SPE_FP_DATA:
324         case BOOKE_IRQPRIO_SPE_FP_ROUND:
325         case BOOKE_IRQPRIO_AP_UNAVAIL:
326         case BOOKE_IRQPRIO_ALIGNMENT:
327                 allowed = 1;
328                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
329                 int_class = INT_CLASS_NONCRIT;
330                 break;
331         case BOOKE_IRQPRIO_CRITICAL:
332         case BOOKE_IRQPRIO_DBELL_CRIT:
333                 allowed = vcpu->arch.shared->msr & MSR_CE;
334                 allowed = allowed && !crit;
335                 msr_mask = MSR_ME;
336                 int_class = INT_CLASS_CRIT;
337                 break;
338         case BOOKE_IRQPRIO_MACHINE_CHECK:
339                 allowed = vcpu->arch.shared->msr & MSR_ME;
340                 allowed = allowed && !crit;
341                 int_class = INT_CLASS_MC;
342                 break;
343         case BOOKE_IRQPRIO_DECREMENTER:
344         case BOOKE_IRQPRIO_FIT:
345                 keep_irq = true;
346                 /* fall through */
347         case BOOKE_IRQPRIO_EXTERNAL:
348         case BOOKE_IRQPRIO_DBELL:
349                 allowed = vcpu->arch.shared->msr & MSR_EE;
350                 allowed = allowed && !crit;
351                 msr_mask = MSR_CE | MSR_ME | MSR_DE;
352                 int_class = INT_CLASS_NONCRIT;
353                 break;
354         case BOOKE_IRQPRIO_DEBUG:
355                 allowed = vcpu->arch.shared->msr & MSR_DE;
356                 allowed = allowed && !crit;
357                 msr_mask = MSR_ME;
358                 int_class = INT_CLASS_CRIT;
359                 break;
360         }
361
362         if (allowed) {
363                 switch (int_class) {
364                 case INT_CLASS_NONCRIT:
365                         set_guest_srr(vcpu, vcpu->arch.pc,
366                                       vcpu->arch.shared->msr);
367                         break;
368                 case INT_CLASS_CRIT:
369                         set_guest_csrr(vcpu, vcpu->arch.pc,
370                                        vcpu->arch.shared->msr);
371                         break;
372                 case INT_CLASS_DBG:
373                         set_guest_dsrr(vcpu, vcpu->arch.pc,
374                                        vcpu->arch.shared->msr);
375                         break;
376                 case INT_CLASS_MC:
377                         set_guest_mcsrr(vcpu, vcpu->arch.pc,
378                                         vcpu->arch.shared->msr);
379                         break;
380                 }
381
382                 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
383                 if (update_esr == true)
384                         set_guest_esr(vcpu, vcpu->arch.queued_esr);
385                 if (update_dear == true)
386                         set_guest_dear(vcpu, vcpu->arch.queued_dear);
387                 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
388
389                 if (!keep_irq)
390                         clear_bit(priority, &vcpu->arch.pending_exceptions);
391         }
392
393 #ifdef CONFIG_KVM_BOOKE_HV
394         /*
395          * If an interrupt is pending but masked, raise a guest doorbell
396          * so that we are notified when the guest enables the relevant
397          * MSR bit.
398          */
399         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
400                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
401         if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
402                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
403         if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
404                 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
405 #endif
406
407         return allowed;
408 }
409
410 static void update_timer_ints(struct kvm_vcpu *vcpu)
411 {
412         if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
413                 kvmppc_core_queue_dec(vcpu);
414         else
415                 kvmppc_core_dequeue_dec(vcpu);
416 }
417
418 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
419 {
420         unsigned long *pending = &vcpu->arch.pending_exceptions;
421         unsigned int priority;
422
423         priority = __ffs(*pending);
424         while (priority < BOOKE_IRQPRIO_MAX) {
425                 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
426                         break;
427
428                 priority = find_next_bit(pending,
429                                          BITS_PER_BYTE * sizeof(*pending),
430                                          priority + 1);
431         }
432
433         /* Tell the guest about our interrupt status */
434         vcpu->arch.shared->int_pending = !!*pending;
435 }
436
437 /* Check pending exceptions and deliver one, if possible. */
438 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
439 {
440         int r = 0;
441         WARN_ON_ONCE(!irqs_disabled());
442
443         kvmppc_core_check_exceptions(vcpu);
444
445         if (vcpu->arch.shared->msr & MSR_WE) {
446                 local_irq_enable();
447                 kvm_vcpu_block(vcpu);
448                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
449                 local_irq_disable();
450
451                 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
452                 r = 1;
453         };
454
455         return r;
456 }
457
458 void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
459 {
460         if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
461                 update_timer_ints(vcpu);
462 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
463         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
464                 kvmppc_core_flush_tlb(vcpu);
465 #endif
466 }
467
468 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
469 {
470         int ret;
471 #ifdef CONFIG_PPC_FPU
472         unsigned int fpscr;
473         int fpexc_mode;
474         u64 fpr[32];
475 #endif
476
477         if (!vcpu->arch.sane) {
478                 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
479                 return -EINVAL;
480         }
481
482         local_irq_disable();
483         if (kvmppc_prepare_to_enter(vcpu)) {
484                 local_irq_enable();
485                 kvm_run->exit_reason = KVM_EXIT_INTR;
486                 ret = -EINTR;
487                 goto out;
488         }
489         kvmppc_lazy_ee_enable();
490
491         kvm_guest_enter();
492
493 #ifdef CONFIG_PPC_FPU
494         /* Save userspace FPU state in stack */
495         enable_kernel_fp();
496         memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
497         fpscr = current->thread.fpscr.val;
498         fpexc_mode = current->thread.fpexc_mode;
499
500         /* Restore guest FPU state to thread */
501         memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
502         current->thread.fpscr.val = vcpu->arch.fpscr;
503
504         /*
505          * Since we can't trap on MSR_FP in GS-mode, we consider the guest
506          * as always using the FPU.  Kernel usage of FP (via
507          * enable_kernel_fp()) in this thread must not occur while
508          * vcpu->fpu_active is set.
509          */
510         vcpu->fpu_active = 1;
511
512         kvmppc_load_guest_fp(vcpu);
513 #endif
514
515         ret = __kvmppc_vcpu_run(kvm_run, vcpu);
516
517         /* No need for kvm_guest_exit. It's done in handle_exit.
518            We also get here with interrupts enabled. */
519
520 #ifdef CONFIG_PPC_FPU
521         kvmppc_save_guest_fp(vcpu);
522
523         vcpu->fpu_active = 0;
524
525         /* Save guest FPU state from thread */
526         memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
527         vcpu->arch.fpscr = current->thread.fpscr.val;
528
529         /* Restore userspace FPU state from stack */
530         memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
531         current->thread.fpscr.val = fpscr;
532         current->thread.fpexc_mode = fpexc_mode;
533 #endif
534
535 out:
536         vcpu->mode = OUTSIDE_GUEST_MODE;
537         smp_wmb();
538         return ret;
539 }
540
541 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
542 {
543         enum emulation_result er;
544
545         er = kvmppc_emulate_instruction(run, vcpu);
546         switch (er) {
547         case EMULATE_DONE:
548                 /* don't overwrite subtypes, just account kvm_stats */
549                 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
550                 /* Future optimization: only reload non-volatiles if
551                  * they were actually modified by emulation. */
552                 return RESUME_GUEST_NV;
553
554         case EMULATE_DO_DCR:
555                 run->exit_reason = KVM_EXIT_DCR;
556                 return RESUME_HOST;
557
558         case EMULATE_FAIL:
559                 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
560                        __func__, vcpu->arch.pc, vcpu->arch.last_inst);
561                 /* For debugging, encode the failing instruction and
562                  * report it to userspace. */
563                 run->hw.hardware_exit_reason = ~0ULL << 32;
564                 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
565                 kvmppc_core_queue_program(vcpu, ESR_PIL);
566                 return RESUME_HOST;
567
568         default:
569                 BUG();
570         }
571 }
572
573 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
574 {
575         ulong r1, ip, msr, lr;
576
577         asm("mr %0, 1" : "=r"(r1));
578         asm("mflr %0" : "=r"(lr));
579         asm("mfmsr %0" : "=r"(msr));
580         asm("bl 1f; 1: mflr %0" : "=r"(ip));
581
582         memset(regs, 0, sizeof(*regs));
583         regs->gpr[1] = r1;
584         regs->nip = ip;
585         regs->msr = msr;
586         regs->link = lr;
587 }
588
589 /*
590  * For interrupts needed to be handled by host interrupt handlers,
591  * corresponding host handler are called from here in similar way
592  * (but not exact) as they are called from low level handler
593  * (such as from arch/powerpc/kernel/head_fsl_booke.S).
594  */
595 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
596                                      unsigned int exit_nr)
597 {
598         struct pt_regs regs;
599
600         switch (exit_nr) {
601         case BOOKE_INTERRUPT_EXTERNAL:
602                 kvmppc_fill_pt_regs(&regs);
603                 do_IRQ(&regs);
604                 break;
605         case BOOKE_INTERRUPT_DECREMENTER:
606                 kvmppc_fill_pt_regs(&regs);
607                 timer_interrupt(&regs);
608                 break;
609 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
610         case BOOKE_INTERRUPT_DOORBELL:
611                 kvmppc_fill_pt_regs(&regs);
612                 doorbell_exception(&regs);
613                 break;
614 #endif
615         case BOOKE_INTERRUPT_MACHINE_CHECK:
616                 /* FIXME */
617                 break;
618         case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
619                 kvmppc_fill_pt_regs(&regs);
620                 performance_monitor_exception(&regs);
621                 break;
622         case BOOKE_INTERRUPT_WATCHDOG:
623                 kvmppc_fill_pt_regs(&regs);
624 #ifdef CONFIG_BOOKE_WDT
625                 WatchdogException(&regs);
626 #else
627                 unknown_exception(&regs);
628 #endif
629                 break;
630         case BOOKE_INTERRUPT_CRITICAL:
631                 unknown_exception(&regs);
632                 break;
633         }
634 }
635
636 /**
637  * kvmppc_handle_exit
638  *
639  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
640  */
641 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
642                        unsigned int exit_nr)
643 {
644         int r = RESUME_HOST;
645
646         /* update before a new last_exit_type is rewritten */
647         kvmppc_update_timing_stats(vcpu);
648
649         /* restart interrupts if they were meant for the host */
650         kvmppc_restart_interrupt(vcpu, exit_nr);
651
652         local_irq_enable();
653
654         trace_kvm_exit(exit_nr, vcpu);
655         kvm_guest_exit();
656
657         run->exit_reason = KVM_EXIT_UNKNOWN;
658         run->ready_for_interrupt_injection = 1;
659
660         switch (exit_nr) {
661         case BOOKE_INTERRUPT_MACHINE_CHECK:
662                 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
663                 kvmppc_dump_vcpu(vcpu);
664                 /* For debugging, send invalid exit reason to user space */
665                 run->hw.hardware_exit_reason = ~1ULL << 32;
666                 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
667                 r = RESUME_HOST;
668                 break;
669
670         case BOOKE_INTERRUPT_EXTERNAL:
671                 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
672                 r = RESUME_GUEST;
673                 break;
674
675         case BOOKE_INTERRUPT_DECREMENTER:
676                 kvmppc_account_exit(vcpu, DEC_EXITS);
677                 r = RESUME_GUEST;
678                 break;
679
680         case BOOKE_INTERRUPT_WATCHDOG:
681                 r = RESUME_GUEST;
682                 break;
683
684         case BOOKE_INTERRUPT_DOORBELL:
685                 kvmppc_account_exit(vcpu, DBELL_EXITS);
686                 r = RESUME_GUEST;
687                 break;
688
689         case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
690                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
691
692                 /*
693                  * We are here because there is a pending guest interrupt
694                  * which could not be delivered as MSR_CE or MSR_ME was not
695                  * set.  Once we break from here we will retry delivery.
696                  */
697                 r = RESUME_GUEST;
698                 break;
699
700         case BOOKE_INTERRUPT_GUEST_DBELL:
701                 kvmppc_account_exit(vcpu, GDBELL_EXITS);
702
703                 /*
704                  * We are here because there is a pending guest interrupt
705                  * which could not be delivered as MSR_EE was not set.  Once
706                  * we break from here we will retry delivery.
707                  */
708                 r = RESUME_GUEST;
709                 break;
710
711         case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
712                 r = RESUME_GUEST;
713                 break;
714
715         case BOOKE_INTERRUPT_HV_PRIV:
716                 r = emulation_exit(run, vcpu);
717                 break;
718
719         case BOOKE_INTERRUPT_PROGRAM:
720                 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
721                         /*
722                          * Program traps generated by user-level software must
723                          * be handled by the guest kernel.
724                          *
725                          * In GS mode, hypervisor privileged instructions trap
726                          * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
727                          * actual program interrupts, handled by the guest.
728                          */
729                         kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
730                         r = RESUME_GUEST;
731                         kvmppc_account_exit(vcpu, USR_PR_INST);
732                         break;
733                 }
734
735                 r = emulation_exit(run, vcpu);
736                 break;
737
738         case BOOKE_INTERRUPT_FP_UNAVAIL:
739                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
740                 kvmppc_account_exit(vcpu, FP_UNAVAIL);
741                 r = RESUME_GUEST;
742                 break;
743
744 #ifdef CONFIG_SPE
745         case BOOKE_INTERRUPT_SPE_UNAVAIL: {
746                 if (vcpu->arch.shared->msr & MSR_SPE)
747                         kvmppc_vcpu_enable_spe(vcpu);
748                 else
749                         kvmppc_booke_queue_irqprio(vcpu,
750                                                    BOOKE_IRQPRIO_SPE_UNAVAIL);
751                 r = RESUME_GUEST;
752                 break;
753         }
754
755         case BOOKE_INTERRUPT_SPE_FP_DATA:
756                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
757                 r = RESUME_GUEST;
758                 break;
759
760         case BOOKE_INTERRUPT_SPE_FP_ROUND:
761                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
762                 r = RESUME_GUEST;
763                 break;
764 #else
765         case BOOKE_INTERRUPT_SPE_UNAVAIL:
766                 /*
767                  * Guest wants SPE, but host kernel doesn't support it.  Send
768                  * an "unimplemented operation" program check to the guest.
769                  */
770                 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
771                 r = RESUME_GUEST;
772                 break;
773
774         /*
775          * These really should never happen without CONFIG_SPE,
776          * as we should never enable the real MSR[SPE] in the guest.
777          */
778         case BOOKE_INTERRUPT_SPE_FP_DATA:
779         case BOOKE_INTERRUPT_SPE_FP_ROUND:
780                 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
781                        __func__, exit_nr, vcpu->arch.pc);
782                 run->hw.hardware_exit_reason = exit_nr;
783                 r = RESUME_HOST;
784                 break;
785 #endif
786
787         case BOOKE_INTERRUPT_DATA_STORAGE:
788                 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
789                                                vcpu->arch.fault_esr);
790                 kvmppc_account_exit(vcpu, DSI_EXITS);
791                 r = RESUME_GUEST;
792                 break;
793
794         case BOOKE_INTERRUPT_INST_STORAGE:
795                 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
796                 kvmppc_account_exit(vcpu, ISI_EXITS);
797                 r = RESUME_GUEST;
798                 break;
799
800 #ifdef CONFIG_KVM_BOOKE_HV
801         case BOOKE_INTERRUPT_HV_SYSCALL:
802                 if (!(vcpu->arch.shared->msr & MSR_PR)) {
803                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
804                 } else {
805                         /*
806                          * hcall from guest userspace -- send privileged
807                          * instruction program check.
808                          */
809                         kvmppc_core_queue_program(vcpu, ESR_PPR);
810                 }
811
812                 r = RESUME_GUEST;
813                 break;
814 #else
815         case BOOKE_INTERRUPT_SYSCALL:
816                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
817                     (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
818                         /* KVM PV hypercalls */
819                         kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
820                         r = RESUME_GUEST;
821                 } else {
822                         /* Guest syscalls */
823                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
824                 }
825                 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
826                 r = RESUME_GUEST;
827                 break;
828 #endif
829
830         case BOOKE_INTERRUPT_DTLB_MISS: {
831                 unsigned long eaddr = vcpu->arch.fault_dear;
832                 int gtlb_index;
833                 gpa_t gpaddr;
834                 gfn_t gfn;
835
836 #ifdef CONFIG_KVM_E500V2
837                 if (!(vcpu->arch.shared->msr & MSR_PR) &&
838                     (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
839                         kvmppc_map_magic(vcpu);
840                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
841                         r = RESUME_GUEST;
842
843                         break;
844                 }
845 #endif
846
847                 /* Check the guest TLB. */
848                 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
849                 if (gtlb_index < 0) {
850                         /* The guest didn't have a mapping for it. */
851                         kvmppc_core_queue_dtlb_miss(vcpu,
852                                                     vcpu->arch.fault_dear,
853                                                     vcpu->arch.fault_esr);
854                         kvmppc_mmu_dtlb_miss(vcpu);
855                         kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
856                         r = RESUME_GUEST;
857                         break;
858                 }
859
860                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
861                 gfn = gpaddr >> PAGE_SHIFT;
862
863                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
864                         /* The guest TLB had a mapping, but the shadow TLB
865                          * didn't, and it is RAM. This could be because:
866                          * a) the entry is mapping the host kernel, or
867                          * b) the guest used a large mapping which we're faking
868                          * Either way, we need to satisfy the fault without
869                          * invoking the guest. */
870                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
871                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
872                         r = RESUME_GUEST;
873                 } else {
874                         /* Guest has mapped and accessed a page which is not
875                          * actually RAM. */
876                         vcpu->arch.paddr_accessed = gpaddr;
877                         vcpu->arch.vaddr_accessed = eaddr;
878                         r = kvmppc_emulate_mmio(run, vcpu);
879                         kvmppc_account_exit(vcpu, MMIO_EXITS);
880                 }
881
882                 break;
883         }
884
885         case BOOKE_INTERRUPT_ITLB_MISS: {
886                 unsigned long eaddr = vcpu->arch.pc;
887                 gpa_t gpaddr;
888                 gfn_t gfn;
889                 int gtlb_index;
890
891                 r = RESUME_GUEST;
892
893                 /* Check the guest TLB. */
894                 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
895                 if (gtlb_index < 0) {
896                         /* The guest didn't have a mapping for it. */
897                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
898                         kvmppc_mmu_itlb_miss(vcpu);
899                         kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
900                         break;
901                 }
902
903                 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
904
905                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
906                 gfn = gpaddr >> PAGE_SHIFT;
907
908                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
909                         /* The guest TLB had a mapping, but the shadow TLB
910                          * didn't. This could be because:
911                          * a) the entry is mapping the host kernel, or
912                          * b) the guest used a large mapping which we're faking
913                          * Either way, we need to satisfy the fault without
914                          * invoking the guest. */
915                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
916                 } else {
917                         /* Guest mapped and leaped at non-RAM! */
918                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
919                 }
920
921                 break;
922         }
923
924         case BOOKE_INTERRUPT_DEBUG: {
925                 u32 dbsr;
926
927                 vcpu->arch.pc = mfspr(SPRN_CSRR0);
928
929                 /* clear IAC events in DBSR register */
930                 dbsr = mfspr(SPRN_DBSR);
931                 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
932                 mtspr(SPRN_DBSR, dbsr);
933
934                 run->exit_reason = KVM_EXIT_DEBUG;
935                 kvmppc_account_exit(vcpu, DEBUG_EXITS);
936                 r = RESUME_HOST;
937                 break;
938         }
939
940         default:
941                 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
942                 BUG();
943         }
944
945         /*
946          * To avoid clobbering exit_reason, only check for signals if we
947          * aren't already exiting to userspace for some other reason.
948          */
949         if (!(r & RESUME_HOST)) {
950                 local_irq_disable();
951                 if (kvmppc_prepare_to_enter(vcpu)) {
952                         local_irq_enable();
953                         run->exit_reason = KVM_EXIT_INTR;
954                         r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
955                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
956                 } else {
957                         kvmppc_lazy_ee_enable();
958                 }
959         }
960
961         return r;
962 }
963
964 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
965 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
966 {
967         int i;
968         int r;
969
970         vcpu->arch.pc = 0;
971         vcpu->arch.shared->pir = vcpu->vcpu_id;
972         kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
973         kvmppc_set_msr(vcpu, 0);
974
975 #ifndef CONFIG_KVM_BOOKE_HV
976         vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
977         vcpu->arch.shadow_pid = 1;
978         vcpu->arch.shared->msr = 0;
979 #endif
980
981         /* Eye-catching numbers so we know if the guest takes an interrupt
982          * before it's programmed its own IVPR/IVORs. */
983         vcpu->arch.ivpr = 0x55550000;
984         for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
985                 vcpu->arch.ivor[i] = 0x7700 | i * 4;
986
987         kvmppc_init_timing_stats(vcpu);
988
989         r = kvmppc_core_vcpu_setup(vcpu);
990         kvmppc_sanity_check(vcpu);
991         return r;
992 }
993
994 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
995 {
996         int i;
997
998         regs->pc = vcpu->arch.pc;
999         regs->cr = kvmppc_get_cr(vcpu);
1000         regs->ctr = vcpu->arch.ctr;
1001         regs->lr = vcpu->arch.lr;
1002         regs->xer = kvmppc_get_xer(vcpu);
1003         regs->msr = vcpu->arch.shared->msr;
1004         regs->srr0 = vcpu->arch.shared->srr0;
1005         regs->srr1 = vcpu->arch.shared->srr1;
1006         regs->pid = vcpu->arch.pid;
1007         regs->sprg0 = vcpu->arch.shared->sprg0;
1008         regs->sprg1 = vcpu->arch.shared->sprg1;
1009         regs->sprg2 = vcpu->arch.shared->sprg2;
1010         regs->sprg3 = vcpu->arch.shared->sprg3;
1011         regs->sprg4 = vcpu->arch.shared->sprg4;
1012         regs->sprg5 = vcpu->arch.shared->sprg5;
1013         regs->sprg6 = vcpu->arch.shared->sprg6;
1014         regs->sprg7 = vcpu->arch.shared->sprg7;
1015
1016         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1017                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1018
1019         return 0;
1020 }
1021
1022 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1023 {
1024         int i;
1025
1026         vcpu->arch.pc = regs->pc;
1027         kvmppc_set_cr(vcpu, regs->cr);
1028         vcpu->arch.ctr = regs->ctr;
1029         vcpu->arch.lr = regs->lr;
1030         kvmppc_set_xer(vcpu, regs->xer);
1031         kvmppc_set_msr(vcpu, regs->msr);
1032         vcpu->arch.shared->srr0 = regs->srr0;
1033         vcpu->arch.shared->srr1 = regs->srr1;
1034         kvmppc_set_pid(vcpu, regs->pid);
1035         vcpu->arch.shared->sprg0 = regs->sprg0;
1036         vcpu->arch.shared->sprg1 = regs->sprg1;
1037         vcpu->arch.shared->sprg2 = regs->sprg2;
1038         vcpu->arch.shared->sprg3 = regs->sprg3;
1039         vcpu->arch.shared->sprg4 = regs->sprg4;
1040         vcpu->arch.shared->sprg5 = regs->sprg5;
1041         vcpu->arch.shared->sprg6 = regs->sprg6;
1042         vcpu->arch.shared->sprg7 = regs->sprg7;
1043
1044         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1045                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1046
1047         return 0;
1048 }
1049
1050 static void get_sregs_base(struct kvm_vcpu *vcpu,
1051                            struct kvm_sregs *sregs)
1052 {
1053         u64 tb = get_tb();
1054
1055         sregs->u.e.features |= KVM_SREGS_E_BASE;
1056
1057         sregs->u.e.csrr0 = vcpu->arch.csrr0;
1058         sregs->u.e.csrr1 = vcpu->arch.csrr1;
1059         sregs->u.e.mcsr = vcpu->arch.mcsr;
1060         sregs->u.e.esr = get_guest_esr(vcpu);
1061         sregs->u.e.dear = get_guest_dear(vcpu);
1062         sregs->u.e.tsr = vcpu->arch.tsr;
1063         sregs->u.e.tcr = vcpu->arch.tcr;
1064         sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1065         sregs->u.e.tb = tb;
1066         sregs->u.e.vrsave = vcpu->arch.vrsave;
1067 }
1068
1069 static int set_sregs_base(struct kvm_vcpu *vcpu,
1070                           struct kvm_sregs *sregs)
1071 {
1072         if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1073                 return 0;
1074
1075         vcpu->arch.csrr0 = sregs->u.e.csrr0;
1076         vcpu->arch.csrr1 = sregs->u.e.csrr1;
1077         vcpu->arch.mcsr = sregs->u.e.mcsr;
1078         set_guest_esr(vcpu, sregs->u.e.esr);
1079         set_guest_dear(vcpu, sregs->u.e.dear);
1080         vcpu->arch.vrsave = sregs->u.e.vrsave;
1081         kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1082
1083         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1084                 vcpu->arch.dec = sregs->u.e.dec;
1085                 kvmppc_emulate_dec(vcpu);
1086         }
1087
1088         if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1089                 vcpu->arch.tsr = sregs->u.e.tsr;
1090                 update_timer_ints(vcpu);
1091         }
1092
1093         return 0;
1094 }
1095
1096 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1097                               struct kvm_sregs *sregs)
1098 {
1099         sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1100
1101         sregs->u.e.pir = vcpu->vcpu_id;
1102         sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1103         sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1104         sregs->u.e.decar = vcpu->arch.decar;
1105         sregs->u.e.ivpr = vcpu->arch.ivpr;
1106 }
1107
1108 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1109                              struct kvm_sregs *sregs)
1110 {
1111         if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1112                 return 0;
1113
1114         if (sregs->u.e.pir != vcpu->vcpu_id)
1115                 return -EINVAL;
1116
1117         vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1118         vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1119         vcpu->arch.decar = sregs->u.e.decar;
1120         vcpu->arch.ivpr = sregs->u.e.ivpr;
1121
1122         return 0;
1123 }
1124
1125 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1126 {
1127         sregs->u.e.features |= KVM_SREGS_E_IVOR;
1128
1129         sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1130         sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1131         sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1132         sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1133         sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1134         sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1135         sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1136         sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1137         sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1138         sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1139         sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1140         sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1141         sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1142         sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1143         sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1144         sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1145 }
1146
1147 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1148 {
1149         if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1150                 return 0;
1151
1152         vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1153         vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1154         vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1155         vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1156         vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1157         vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1158         vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1159         vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1160         vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1161         vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1162         vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1163         vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1164         vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1165         vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1166         vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1167         vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1168
1169         return 0;
1170 }
1171
1172 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1173                                   struct kvm_sregs *sregs)
1174 {
1175         sregs->pvr = vcpu->arch.pvr;
1176
1177         get_sregs_base(vcpu, sregs);
1178         get_sregs_arch206(vcpu, sregs);
1179         kvmppc_core_get_sregs(vcpu, sregs);
1180         return 0;
1181 }
1182
1183 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1184                                   struct kvm_sregs *sregs)
1185 {
1186         int ret;
1187
1188         if (vcpu->arch.pvr != sregs->pvr)
1189                 return -EINVAL;
1190
1191         ret = set_sregs_base(vcpu, sregs);
1192         if (ret < 0)
1193                 return ret;
1194
1195         ret = set_sregs_arch206(vcpu, sregs);
1196         if (ret < 0)
1197                 return ret;
1198
1199         return kvmppc_core_set_sregs(vcpu, sregs);
1200 }
1201
1202 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1203 {
1204         return -EINVAL;
1205 }
1206
1207 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1208 {
1209         return -EINVAL;
1210 }
1211
1212 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1213 {
1214         return -ENOTSUPP;
1215 }
1216
1217 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1218 {
1219         return -ENOTSUPP;
1220 }
1221
1222 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1223                                   struct kvm_translation *tr)
1224 {
1225         int r;
1226
1227         r = kvmppc_core_vcpu_translate(vcpu, tr);
1228         return r;
1229 }
1230
1231 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1232 {
1233         return -ENOTSUPP;
1234 }
1235
1236 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1237                                       struct kvm_userspace_memory_region *mem)
1238 {
1239         return 0;
1240 }
1241
1242 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1243                                 struct kvm_userspace_memory_region *mem)
1244 {
1245 }
1246
1247 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1248 {
1249         vcpu->arch.tcr = new_tcr;
1250         update_timer_ints(vcpu);
1251 }
1252
1253 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1254 {
1255         set_bits(tsr_bits, &vcpu->arch.tsr);
1256         smp_wmb();
1257         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1258         kvm_vcpu_kick(vcpu);
1259 }
1260
1261 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1262 {
1263         clear_bits(tsr_bits, &vcpu->arch.tsr);
1264         update_timer_ints(vcpu);
1265 }
1266
1267 void kvmppc_decrementer_func(unsigned long data)
1268 {
1269         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1270
1271         if (vcpu->arch.tcr & TCR_ARE) {
1272                 vcpu->arch.dec = vcpu->arch.decar;
1273                 kvmppc_emulate_dec(vcpu);
1274         }
1275
1276         kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1277 }
1278
1279 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1280 {
1281         current->thread.kvm_vcpu = vcpu;
1282 }
1283
1284 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1285 {
1286         current->thread.kvm_vcpu = NULL;
1287 }
1288
1289 int __init kvmppc_booke_init(void)
1290 {
1291 #ifndef CONFIG_KVM_BOOKE_HV
1292         unsigned long ivor[16];
1293         unsigned long max_ivor = 0;
1294         int i;
1295
1296         /* We install our own exception handlers by hijacking IVPR. IVPR must
1297          * be 16-bit aligned, so we need a 64KB allocation. */
1298         kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1299                                                  VCPU_SIZE_ORDER);
1300         if (!kvmppc_booke_handlers)
1301                 return -ENOMEM;
1302
1303         /* XXX make sure our handlers are smaller than Linux's */
1304
1305         /* Copy our interrupt handlers to match host IVORs. That way we don't
1306          * have to swap the IVORs on every guest/host transition. */
1307         ivor[0] = mfspr(SPRN_IVOR0);
1308         ivor[1] = mfspr(SPRN_IVOR1);
1309         ivor[2] = mfspr(SPRN_IVOR2);
1310         ivor[3] = mfspr(SPRN_IVOR3);
1311         ivor[4] = mfspr(SPRN_IVOR4);
1312         ivor[5] = mfspr(SPRN_IVOR5);
1313         ivor[6] = mfspr(SPRN_IVOR6);
1314         ivor[7] = mfspr(SPRN_IVOR7);
1315         ivor[8] = mfspr(SPRN_IVOR8);
1316         ivor[9] = mfspr(SPRN_IVOR9);
1317         ivor[10] = mfspr(SPRN_IVOR10);
1318         ivor[11] = mfspr(SPRN_IVOR11);
1319         ivor[12] = mfspr(SPRN_IVOR12);
1320         ivor[13] = mfspr(SPRN_IVOR13);
1321         ivor[14] = mfspr(SPRN_IVOR14);
1322         ivor[15] = mfspr(SPRN_IVOR15);
1323
1324         for (i = 0; i < 16; i++) {
1325                 if (ivor[i] > max_ivor)
1326                         max_ivor = ivor[i];
1327
1328                 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1329                        kvmppc_handlers_start + i * kvmppc_handler_len,
1330                        kvmppc_handler_len);
1331         }
1332         flush_icache_range(kvmppc_booke_handlers,
1333                            kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1334 #endif /* !BOOKE_HV */
1335         return 0;
1336 }
1337
1338 void __exit kvmppc_booke_exit(void)
1339 {
1340         free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1341         kvm_exit();
1342 }