2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
46 /* #define EXIT_DEBUG */
47 /* #define DEBUG_EXT */
49 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
52 /* Some compatibility defines */
53 #ifdef CONFIG_PPC_BOOK3S_32
54 #define MSR_USER32 MSR_USER
55 #define MSR_USER64 MSR_USER
56 #define HW_PAGE_SIZE PAGE_SIZE
59 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
61 #ifdef CONFIG_PPC_BOOK3S_64
62 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
64 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 vcpu->cpu = smp_processor_id();
68 #ifdef CONFIG_PPC_BOOK3S_32
69 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
73 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
75 #ifdef CONFIG_PPC_BOOK3S_64
76 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
77 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
78 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
82 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
86 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
87 void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
88 struct kvm_vcpu *vcpu)
90 svcpu->gpr[0] = vcpu->arch.gpr[0];
91 svcpu->gpr[1] = vcpu->arch.gpr[1];
92 svcpu->gpr[2] = vcpu->arch.gpr[2];
93 svcpu->gpr[3] = vcpu->arch.gpr[3];
94 svcpu->gpr[4] = vcpu->arch.gpr[4];
95 svcpu->gpr[5] = vcpu->arch.gpr[5];
96 svcpu->gpr[6] = vcpu->arch.gpr[6];
97 svcpu->gpr[7] = vcpu->arch.gpr[7];
98 svcpu->gpr[8] = vcpu->arch.gpr[8];
99 svcpu->gpr[9] = vcpu->arch.gpr[9];
100 svcpu->gpr[10] = vcpu->arch.gpr[10];
101 svcpu->gpr[11] = vcpu->arch.gpr[11];
102 svcpu->gpr[12] = vcpu->arch.gpr[12];
103 svcpu->gpr[13] = vcpu->arch.gpr[13];
104 svcpu->cr = vcpu->arch.cr;
105 svcpu->xer = vcpu->arch.xer;
106 svcpu->ctr = vcpu->arch.ctr;
107 svcpu->lr = vcpu->arch.lr;
108 svcpu->pc = vcpu->arch.pc;
111 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
112 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
113 struct kvmppc_book3s_shadow_vcpu *svcpu)
115 vcpu->arch.gpr[0] = svcpu->gpr[0];
116 vcpu->arch.gpr[1] = svcpu->gpr[1];
117 vcpu->arch.gpr[2] = svcpu->gpr[2];
118 vcpu->arch.gpr[3] = svcpu->gpr[3];
119 vcpu->arch.gpr[4] = svcpu->gpr[4];
120 vcpu->arch.gpr[5] = svcpu->gpr[5];
121 vcpu->arch.gpr[6] = svcpu->gpr[6];
122 vcpu->arch.gpr[7] = svcpu->gpr[7];
123 vcpu->arch.gpr[8] = svcpu->gpr[8];
124 vcpu->arch.gpr[9] = svcpu->gpr[9];
125 vcpu->arch.gpr[10] = svcpu->gpr[10];
126 vcpu->arch.gpr[11] = svcpu->gpr[11];
127 vcpu->arch.gpr[12] = svcpu->gpr[12];
128 vcpu->arch.gpr[13] = svcpu->gpr[13];
129 vcpu->arch.cr = svcpu->cr;
130 vcpu->arch.xer = svcpu->xer;
131 vcpu->arch.ctr = svcpu->ctr;
132 vcpu->arch.lr = svcpu->lr;
133 vcpu->arch.pc = svcpu->pc;
134 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
135 vcpu->arch.fault_dar = svcpu->fault_dar;
136 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
137 vcpu->arch.last_inst = svcpu->last_inst;
140 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
142 int r = 1; /* Indicate we want to get back into the guest */
144 /* We misuse TLB_FLUSH to indicate that we want to clear
145 all shadow cache entries */
146 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
147 kvmppc_mmu_pte_flush(vcpu, 0, 0);
152 /************* MMU Notifiers *************/
154 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
156 trace_kvm_unmap_hva(hva);
159 * Flush all shadow tlb entries everywhere. This is slow, but
160 * we are 100% sure that we catch the to be unmapped page
162 kvm_flush_remote_tlbs(kvm);
167 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
169 /* kvm_unmap_hva flushes everything anyways */
170 kvm_unmap_hva(kvm, start);
175 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
177 /* XXX could be more clever ;) */
181 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
183 /* XXX could be more clever ;) */
187 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
189 /* The page will get remapped properly on its next fault */
190 kvm_unmap_hva(kvm, hva);
193 /*****************************************/
195 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
197 ulong smsr = vcpu->arch.shared->msr;
199 /* Guest MSR values */
200 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
201 /* Process MSR values */
202 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
203 /* External providers the guest reserved */
204 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
205 /* 64-bit Process MSR values */
206 #ifdef CONFIG_PPC_BOOK3S_64
207 smsr |= MSR_ISF | MSR_HV;
209 vcpu->arch.shadow_msr = smsr;
212 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
214 ulong old_msr = vcpu->arch.shared->msr;
217 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
220 msr &= to_book3s(vcpu)->msr_mask;
221 vcpu->arch.shared->msr = msr;
222 kvmppc_recalc_shadow_msr(vcpu);
225 if (!vcpu->arch.pending_exceptions) {
226 kvm_vcpu_block(vcpu);
227 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
228 vcpu->stat.halt_wakeup++;
230 /* Unset POW bit after we woke up */
232 vcpu->arch.shared->msr = msr;
236 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
237 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
238 kvmppc_mmu_flush_segments(vcpu);
239 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
241 /* Preload magic page segment when in kernel mode */
242 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
243 struct kvm_vcpu_arch *a = &vcpu->arch;
246 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
248 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
253 * When switching from 32 to 64-bit, we may have a stale 32-bit
254 * magic page around, we need to flush it. Typically 32-bit magic
255 * page will be instanciated when calling into RTAS. Note: We
256 * assume that such transition only happens while in kernel mode,
257 * ie, we never transition from user 32-bit to kernel 64-bit with
258 * a 32-bit magic page around.
260 if (vcpu->arch.magic_page_pa &&
261 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
262 /* going from RTAS to normal kernel code */
263 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
267 /* Preload FPU if it's enabled */
268 if (vcpu->arch.shared->msr & MSR_FP)
269 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
272 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
276 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
277 vcpu->arch.pvr = pvr;
278 #ifdef CONFIG_PPC_BOOK3S_64
279 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
280 kvmppc_mmu_book3s_64_init(vcpu);
281 if (!to_book3s(vcpu)->hior_explicit)
282 to_book3s(vcpu)->hior = 0xfff00000;
283 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
284 vcpu->arch.cpu_type = KVM_CPU_3S_64;
288 kvmppc_mmu_book3s_32_init(vcpu);
289 if (!to_book3s(vcpu)->hior_explicit)
290 to_book3s(vcpu)->hior = 0;
291 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
292 vcpu->arch.cpu_type = KVM_CPU_3S_32;
295 kvmppc_sanity_check(vcpu);
297 /* If we are in hypervisor level on 970, we can tell the CPU to
298 * treat DCBZ as 32 bytes store */
299 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
300 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
301 !strcmp(cur_cpu_spec->platform, "ppc970"))
302 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
304 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
305 really needs them in a VM on Cell and force disable them. */
306 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
307 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
310 * If they're asking for POWER6 or later, set the flag
311 * indicating that we can do multiple large page sizes
313 * Also set the flag that indicates that tlbie has the large
314 * page bit in the RB operand instead of the instruction.
316 switch (PVR_VER(pvr)) {
321 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
322 BOOK3S_HFLAG_NEW_TLBIE;
326 #ifdef CONFIG_PPC_BOOK3S_32
327 /* 32 bit Book3S always has 32 byte dcbz */
328 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
331 /* On some CPUs we can execute paired single operations natively */
332 asm ( "mfpvr %0" : "=r"(host_pvr));
334 case 0x00080200: /* lonestar 2.0 */
335 case 0x00088202: /* lonestar 2.2 */
336 case 0x70000100: /* gekko 1.0 */
337 case 0x00080100: /* gekko 2.0 */
338 case 0x00083203: /* gekko 2.3a */
339 case 0x00083213: /* gekko 2.3b */
340 case 0x00083204: /* gekko 2.4 */
341 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
342 case 0x00087200: /* broadway */
343 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
344 /* Enable HID2.PSE - in case we need it later */
345 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
349 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
350 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
351 * emulate 32 bytes dcbz length.
353 * The Book3s_64 inventors also realized this case and implemented a special bit
354 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
356 * My approach here is to patch the dcbz instruction on executing pages.
358 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
365 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
366 if (is_error_page(hpage))
369 hpage_offset = pte->raddr & ~PAGE_MASK;
370 hpage_offset &= ~0xFFFULL;
374 page = kmap_atomic(hpage);
376 /* patch dcbz into reserved instruction, so we trap */
377 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
378 if ((page[i] & 0xff0007ff) == INS_DCBZ)
379 page[i] &= 0xfffffff7;
385 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
387 ulong mp_pa = vcpu->arch.magic_page_pa;
389 if (!(vcpu->arch.shared->msr & MSR_SF))
390 mp_pa = (uint32_t)mp_pa;
392 if (unlikely(mp_pa) &&
393 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
397 return kvm_is_visible_gfn(vcpu->kvm, gfn);
400 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
401 ulong eaddr, int vec)
403 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
404 int r = RESUME_GUEST;
407 struct kvmppc_pte pte;
408 bool is_mmio = false;
409 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
410 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
413 relocated = data ? dr : ir;
415 /* Resolve real address if translation turned on */
417 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
419 pte.may_execute = true;
421 pte.may_write = true;
422 pte.raddr = eaddr & KVM_PAM;
424 pte.vpage = eaddr >> 12;
427 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
429 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
433 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
435 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
436 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
438 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
442 page_found = -EINVAL;
446 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
447 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
449 * If we do the dcbz hack, we have to NX on every execution,
450 * so we can patch the executing code. This renders our guest
453 pte.may_execute = !data;
456 if (page_found == -ENOENT) {
457 /* Page not found in guest PTE entries */
458 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
459 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
460 vcpu->arch.shared->msr |=
461 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
462 kvmppc_book3s_queue_irqprio(vcpu, vec);
463 } else if (page_found == -EPERM) {
464 /* Storage protection */
465 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
466 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
467 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
468 vcpu->arch.shared->msr |=
469 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
470 kvmppc_book3s_queue_irqprio(vcpu, vec);
471 } else if (page_found == -EINVAL) {
472 /* Page not found in guest SLB */
473 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
474 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
475 } else if (!is_mmio &&
476 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
477 /* The guest's PTE is not mapped yet. Map on the host */
478 kvmppc_mmu_map_page(vcpu, &pte);
480 vcpu->stat.sp_storage++;
481 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
482 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
483 kvmppc_patch_dcbz(vcpu, &pte);
486 vcpu->stat.mmio_exits++;
487 vcpu->arch.paddr_accessed = pte.raddr;
488 vcpu->arch.vaddr_accessed = pte.eaddr;
489 r = kvmppc_emulate_mmio(run, vcpu);
490 if ( r == RESUME_HOST_NV )
497 static inline int get_fpr_index(int i)
499 return i * TS_FPRWIDTH;
502 /* Give up external provider (FPU, Altivec, VSX) */
503 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
505 struct thread_struct *t = ¤t->thread;
506 u64 *vcpu_fpr = vcpu->arch.fpr;
508 u64 *vcpu_vsx = vcpu->arch.vsr;
510 u64 *thread_fpr = (u64*)t->fpr;
514 * VSX instructions can access FP and vector registers, so if
515 * we are giving up VSX, make sure we give up FP and VMX as well.
518 msr |= MSR_FP | MSR_VEC;
520 msr &= vcpu->arch.guest_owned_ext;
525 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
530 * Note that on CPUs with VSX, giveup_fpu stores
531 * both the traditional FP registers and the added VSX
532 * registers into thread.fpr[].
534 if (current->thread.regs->msr & MSR_FP)
536 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
537 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
539 vcpu->arch.fpscr = t->fpscr.val;
542 if (cpu_has_feature(CPU_FTR_VSX))
543 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
544 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
548 #ifdef CONFIG_ALTIVEC
550 if (current->thread.regs->msr & MSR_VEC)
551 giveup_altivec(current);
552 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
553 vcpu->arch.vscr = t->vscr;
557 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
558 kvmppc_recalc_shadow_msr(vcpu);
561 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
563 ulong srr0 = kvmppc_get_pc(vcpu);
564 u32 last_inst = kvmppc_get_last_inst(vcpu);
567 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
568 if (ret == -ENOENT) {
569 ulong msr = vcpu->arch.shared->msr;
571 msr = kvmppc_set_field(msr, 33, 33, 1);
572 msr = kvmppc_set_field(msr, 34, 36, 0);
573 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
574 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
575 return EMULATE_AGAIN;
581 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
584 /* Need to do paired single emulation? */
585 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
588 /* Read out the instruction */
589 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
590 /* Need to emulate */
593 return EMULATE_AGAIN;
596 /* Handle external providers (FPU, Altivec, VSX) */
597 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
600 struct thread_struct *t = ¤t->thread;
601 u64 *vcpu_fpr = vcpu->arch.fpr;
603 u64 *vcpu_vsx = vcpu->arch.vsr;
605 u64 *thread_fpr = (u64*)t->fpr;
608 /* When we have paired singles, we emulate in software */
609 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
612 if (!(vcpu->arch.shared->msr & msr)) {
613 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
617 if (msr == MSR_VSX) {
618 /* No VSX? Give an illegal instruction interrupt */
620 if (!cpu_has_feature(CPU_FTR_VSX))
623 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
628 * We have to load up all the FP and VMX registers before
629 * we can let the guest use VSX instructions.
631 msr = MSR_FP | MSR_VEC | MSR_VSX;
634 /* See if we already own all the ext(s) needed */
635 msr &= ~vcpu->arch.guest_owned_ext;
640 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
644 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
645 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
647 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
648 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
650 t->fpscr.val = vcpu->arch.fpscr;
652 kvmppc_load_up_fpu();
656 #ifdef CONFIG_ALTIVEC
657 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
658 t->vscr = vcpu->arch.vscr;
660 kvmppc_load_up_altivec();
664 current->thread.regs->msr |= msr;
665 vcpu->arch.guest_owned_ext |= msr;
666 kvmppc_recalc_shadow_msr(vcpu);
672 * Kernel code using FP or VMX could have flushed guest state to
673 * the thread_struct; if so, get it back now.
675 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
677 unsigned long lost_ext;
679 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
683 if (lost_ext & MSR_FP)
684 kvmppc_load_up_fpu();
685 #ifdef CONFIG_ALTIVEC
686 if (lost_ext & MSR_VEC)
687 kvmppc_load_up_altivec();
689 current->thread.regs->msr |= lost_ext;
692 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
693 unsigned int exit_nr)
698 vcpu->stat.sum_exits++;
700 run->exit_reason = KVM_EXIT_UNKNOWN;
701 run->ready_for_interrupt_injection = 1;
703 /* We get here with MSR.EE=1 */
705 trace_kvm_exit(exit_nr, vcpu);
709 case BOOK3S_INTERRUPT_INST_STORAGE:
711 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
712 vcpu->stat.pf_instruc++;
714 #ifdef CONFIG_PPC_BOOK3S_32
715 /* We set segments as unused segments when invalidating them. So
716 * treat the respective fault as segment fault. */
718 struct kvmppc_book3s_shadow_vcpu *svcpu;
721 svcpu = svcpu_get(vcpu);
722 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
724 if (sr == SR_INVALID) {
725 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
732 /* only care about PTEG not found errors, but leave NX alone */
733 if (shadow_srr1 & 0x40000000) {
734 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
735 vcpu->stat.sp_instruc++;
736 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
737 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
739 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
740 * so we can't use the NX bit inside the guest. Let's cross our fingers,
741 * that no guest that needs the dcbz hack does NX.
743 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
746 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
747 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
752 case BOOK3S_INTERRUPT_DATA_STORAGE:
754 ulong dar = kvmppc_get_fault_dar(vcpu);
755 u32 fault_dsisr = vcpu->arch.fault_dsisr;
756 vcpu->stat.pf_storage++;
758 #ifdef CONFIG_PPC_BOOK3S_32
759 /* We set segments as unused segments when invalidating them. So
760 * treat the respective fault as segment fault. */
762 struct kvmppc_book3s_shadow_vcpu *svcpu;
765 svcpu = svcpu_get(vcpu);
766 sr = svcpu->sr[dar >> SID_SHIFT];
768 if (sr == SR_INVALID) {
769 kvmppc_mmu_map_segment(vcpu, dar);
776 /* The only case we need to handle is missing shadow PTEs */
777 if (fault_dsisr & DSISR_NOHPTE) {
778 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
780 vcpu->arch.shared->dar = dar;
781 vcpu->arch.shared->dsisr = fault_dsisr;
782 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
787 case BOOK3S_INTERRUPT_DATA_SEGMENT:
788 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
789 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
790 kvmppc_book3s_queue_irqprio(vcpu,
791 BOOK3S_INTERRUPT_DATA_SEGMENT);
795 case BOOK3S_INTERRUPT_INST_SEGMENT:
796 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
797 kvmppc_book3s_queue_irqprio(vcpu,
798 BOOK3S_INTERRUPT_INST_SEGMENT);
802 /* We're good on these - the host merely wanted to get our attention */
803 case BOOK3S_INTERRUPT_DECREMENTER:
804 case BOOK3S_INTERRUPT_HV_DECREMENTER:
805 vcpu->stat.dec_exits++;
808 case BOOK3S_INTERRUPT_EXTERNAL:
809 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
810 case BOOK3S_INTERRUPT_EXTERNAL_HV:
811 vcpu->stat.ext_intr_exits++;
814 case BOOK3S_INTERRUPT_PERFMON:
817 case BOOK3S_INTERRUPT_PROGRAM:
818 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
820 enum emulation_result er;
824 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
826 if (vcpu->arch.shared->msr & MSR_PR) {
828 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
830 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
831 (INS_DCBZ & 0xfffffff7)) {
832 kvmppc_core_queue_program(vcpu, flags);
838 vcpu->stat.emulated_inst_exits++;
839 er = kvmppc_emulate_instruction(run, vcpu);
848 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
849 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
850 kvmppc_core_queue_program(vcpu, flags);
853 case EMULATE_DO_MMIO:
854 run->exit_reason = KVM_EXIT_MMIO;
857 case EMULATE_EXIT_USER:
865 case BOOK3S_INTERRUPT_SYSCALL:
866 if (vcpu->arch.papr_enabled &&
867 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
868 !(vcpu->arch.shared->msr & MSR_PR)) {
869 /* SC 1 papr hypercalls */
870 ulong cmd = kvmppc_get_gpr(vcpu, 3);
873 #ifdef CONFIG_KVM_BOOK3S_64_PR
874 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
880 run->papr_hcall.nr = cmd;
881 for (i = 0; i < 9; ++i) {
882 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
883 run->papr_hcall.args[i] = gpr;
885 run->exit_reason = KVM_EXIT_PAPR_HCALL;
886 vcpu->arch.hcall_needed = 1;
888 } else if (vcpu->arch.osi_enabled &&
889 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
890 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
892 u64 *gprs = run->osi.gprs;
895 run->exit_reason = KVM_EXIT_OSI;
896 for (i = 0; i < 32; i++)
897 gprs[i] = kvmppc_get_gpr(vcpu, i);
898 vcpu->arch.osi_needed = 1;
900 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
901 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
902 /* KVM PV hypercalls */
903 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
907 vcpu->stat.syscall_exits++;
908 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
912 case BOOK3S_INTERRUPT_FP_UNAVAIL:
913 case BOOK3S_INTERRUPT_ALTIVEC:
914 case BOOK3S_INTERRUPT_VSX:
919 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
920 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
921 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
924 switch (kvmppc_check_ext(vcpu, exit_nr)) {
926 /* everything ok - let's enable the ext */
927 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
930 /* we need to emulate this instruction */
931 goto program_interrupt;
934 /* nothing to worry about - go again */
939 case BOOK3S_INTERRUPT_ALIGNMENT:
940 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
941 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
942 kvmppc_get_last_inst(vcpu));
943 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
944 kvmppc_get_last_inst(vcpu));
945 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
949 case BOOK3S_INTERRUPT_MACHINE_CHECK:
950 case BOOK3S_INTERRUPT_TRACE:
951 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
956 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
957 /* Ugh - bork here! What did we get? */
958 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
959 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
966 if (!(r & RESUME_HOST)) {
967 /* To avoid clobbering exit_reason, only check for signals if
968 * we aren't already exiting to userspace for some other
972 * Interrupts could be timers for the guest which we have to
973 * inject again, so let's postpone them until we're in the guest
974 * and if we really did time things so badly, then we just exit
975 * again due to a host external interrupt.
978 s = kvmppc_prepare_to_enter(vcpu);
983 kvmppc_fix_ee_before_entry();
985 kvmppc_handle_lost_ext(vcpu);
988 trace_kvm_book3s_reenter(r, vcpu);
993 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
994 struct kvm_sregs *sregs)
996 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
999 sregs->pvr = vcpu->arch.pvr;
1001 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1002 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1003 for (i = 0; i < 64; i++) {
1004 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1005 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1008 for (i = 0; i < 16; i++)
1009 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1011 for (i = 0; i < 8; i++) {
1012 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1013 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1020 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1021 struct kvm_sregs *sregs)
1023 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1026 kvmppc_set_pvr(vcpu, sregs->pvr);
1028 vcpu3s->sdr1 = sregs->u.s.sdr1;
1029 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1030 for (i = 0; i < 64; i++) {
1031 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1032 sregs->u.s.ppc64.slb[i].slbe);
1035 for (i = 0; i < 16; i++) {
1036 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1038 for (i = 0; i < 8; i++) {
1039 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1040 (u32)sregs->u.s.ppc32.ibat[i]);
1041 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1042 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1043 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1044 (u32)sregs->u.s.ppc32.dbat[i]);
1045 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1046 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1050 /* Flush the MMU after messing with the segments */
1051 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1056 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1061 case KVM_REG_PPC_HIOR:
1062 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1065 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1066 long int i = id - KVM_REG_PPC_VSR0;
1068 if (!cpu_has_feature(CPU_FTR_VSX)) {
1072 val->vsxval[0] = vcpu->arch.fpr[i];
1073 val->vsxval[1] = vcpu->arch.vsr[i];
1076 #endif /* CONFIG_VSX */
1085 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1090 case KVM_REG_PPC_HIOR:
1091 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1092 to_book3s(vcpu)->hior_explicit = true;
1095 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1096 long int i = id - KVM_REG_PPC_VSR0;
1098 if (!cpu_has_feature(CPU_FTR_VSX)) {
1102 vcpu->arch.fpr[i] = val->vsxval[0];
1103 vcpu->arch.vsr[i] = val->vsxval[1];
1106 #endif /* CONFIG_VSX */
1115 int kvmppc_core_check_processor_compat(void)
1120 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1122 struct kvmppc_vcpu_book3s *vcpu_book3s;
1123 struct kvm_vcpu *vcpu;
1127 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1131 #ifdef CONFIG_KVM_BOOK3S_32
1132 vcpu_book3s->shadow_vcpu =
1133 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1134 if (!vcpu_book3s->shadow_vcpu)
1137 vcpu = &vcpu_book3s->vcpu;
1138 err = kvm_vcpu_init(vcpu, kvm, id);
1140 goto free_shadow_vcpu;
1143 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1146 /* the real shared page fills the last 4k of our page */
1147 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1149 #ifdef CONFIG_PPC_BOOK3S_64
1151 * Default to the same as the host if we're on sufficiently
1152 * recent machine that we have 1TB segments;
1153 * otherwise default to PPC970FX.
1155 vcpu->arch.pvr = 0x3C0301;
1156 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1157 vcpu->arch.pvr = mfspr(SPRN_PVR);
1159 /* default to book3s_32 (750) */
1160 vcpu->arch.pvr = 0x84202;
1162 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1163 vcpu->arch.slb_nr = 64;
1165 vcpu->arch.shadow_msr = MSR_USER64;
1167 err = kvmppc_mmu_init(vcpu);
1174 kvm_vcpu_uninit(vcpu);
1176 #ifdef CONFIG_KVM_BOOK3S_32
1177 kfree(vcpu_book3s->shadow_vcpu);
1182 return ERR_PTR(err);
1185 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1187 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1189 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1190 kvm_vcpu_uninit(vcpu);
1191 kfree(vcpu_book3s->shadow_vcpu);
1195 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1198 double fpr[32][TS_FPRWIDTH];
1201 #ifdef CONFIG_ALTIVEC
1204 unsigned long uninitialized_var(vrsave);
1212 /* Check if we can run the vcpu at all */
1213 if (!vcpu->arch.sane) {
1214 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1220 * Interrupts could be timers for the guest which we have to inject
1221 * again, so let's postpone them until we're in the guest and if we
1222 * really did time things so badly, then we just exit again due to
1223 * a host external interrupt.
1225 local_irq_disable();
1226 ret = kvmppc_prepare_to_enter(vcpu);
1232 /* Save FPU state in stack */
1233 if (current->thread.regs->msr & MSR_FP)
1234 giveup_fpu(current);
1235 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1236 fpscr = current->thread.fpscr.val;
1237 fpexc_mode = current->thread.fpexc_mode;
1239 #ifdef CONFIG_ALTIVEC
1240 /* Save Altivec state in stack */
1241 used_vr = current->thread.used_vr;
1243 if (current->thread.regs->msr & MSR_VEC)
1244 giveup_altivec(current);
1245 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1246 vscr = current->thread.vscr;
1247 vrsave = current->thread.vrsave;
1252 /* Save VSX state in stack */
1253 used_vsr = current->thread.used_vsr;
1254 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1255 __giveup_vsx(current);
1258 /* Remember the MSR with disabled extensions */
1259 ext_msr = current->thread.regs->msr;
1261 /* Preload FPU if it's enabled */
1262 if (vcpu->arch.shared->msr & MSR_FP)
1263 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1265 kvmppc_fix_ee_before_entry();
1267 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1269 /* No need for kvm_guest_exit. It's done in handle_exit.
1270 We also get here with interrupts enabled. */
1272 /* Make sure we save the guest FPU/Altivec/VSX state */
1273 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1275 current->thread.regs->msr = ext_msr;
1277 /* Restore FPU/VSX state from stack */
1278 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1279 current->thread.fpscr.val = fpscr;
1280 current->thread.fpexc_mode = fpexc_mode;
1282 #ifdef CONFIG_ALTIVEC
1283 /* Restore Altivec state from stack */
1284 if (used_vr && current->thread.used_vr) {
1285 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1286 current->thread.vscr = vscr;
1287 current->thread.vrsave = vrsave;
1289 current->thread.used_vr = used_vr;
1293 current->thread.used_vsr = used_vsr;
1297 vcpu->mode = OUTSIDE_GUEST_MODE;
1302 * Get (and clear) the dirty memory log for a memory slot.
1304 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1305 struct kvm_dirty_log *log)
1307 struct kvm_memory_slot *memslot;
1308 struct kvm_vcpu *vcpu;
1314 mutex_lock(&kvm->slots_lock);
1316 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1320 /* If nothing is dirty, don't bother messing with page tables. */
1322 memslot = id_to_memslot(kvm->memslots, log->slot);
1324 ga = memslot->base_gfn << PAGE_SHIFT;
1325 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1327 kvm_for_each_vcpu(n, vcpu, kvm)
1328 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1330 n = kvm_dirty_bitmap_bytes(memslot);
1331 memset(memslot->dirty_bitmap, 0, n);
1336 mutex_unlock(&kvm->slots_lock);
1341 int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1344 struct kvm_vcpu *vcpu;
1348 /* SLB is always 64 entries */
1349 info->slb_size = 64;
1351 /* Standard 4k base page size segment */
1352 info->sps[0].page_shift = 12;
1353 info->sps[0].slb_enc = 0;
1354 info->sps[0].enc[0].page_shift = 12;
1355 info->sps[0].enc[0].pte_enc = 0;
1358 * 64k large page size.
1359 * We only want to put this in if the CPUs we're emulating
1360 * support it, but unfortunately we don't have a vcpu easily
1361 * to hand here to test. Just pick the first vcpu, and if
1362 * that doesn't exist yet, report the minimum capability,
1363 * i.e., no 64k pages.
1364 * 1T segment support goes along with 64k pages.
1367 vcpu = kvm_get_vcpu(kvm, 0);
1368 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1369 info->flags = KVM_PPC_1T_SEGMENTS;
1370 info->sps[i].page_shift = 16;
1371 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1372 info->sps[i].enc[0].page_shift = 16;
1373 info->sps[i].enc[0].pte_enc = 1;
1377 /* Standard 16M large page size segment */
1378 info->sps[i].page_shift = 24;
1379 info->sps[i].slb_enc = SLB_VSID_L;
1380 info->sps[i].enc[0].page_shift = 24;
1381 info->sps[i].enc[0].pte_enc = 0;
1385 #endif /* CONFIG_PPC64 */
1387 void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1388 struct kvm_memory_slot *dont)
1392 int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1393 unsigned long npages)
1398 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1399 struct kvm_memory_slot *memslot,
1400 struct kvm_userspace_memory_region *mem)
1405 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1406 struct kvm_userspace_memory_region *mem,
1407 const struct kvm_memory_slot *old)
1411 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1415 static unsigned int kvm_global_user_count = 0;
1416 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1418 int kvmppc_core_init_vm(struct kvm *kvm)
1421 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1422 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1425 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1426 spin_lock(&kvm_global_user_count_lock);
1427 if (++kvm_global_user_count == 1)
1428 pSeries_disable_reloc_on_exc();
1429 spin_unlock(&kvm_global_user_count_lock);
1434 void kvmppc_core_destroy_vm(struct kvm *kvm)
1437 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1440 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1441 spin_lock(&kvm_global_user_count_lock);
1442 BUG_ON(kvm_global_user_count == 0);
1443 if (--kvm_global_user_count == 0)
1444 pSeries_enable_reloc_on_exc();
1445 spin_unlock(&kvm_global_user_count_lock);
1449 static int kvmppc_book3s_init(void)
1453 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1459 r = kvmppc_mmu_hpte_sysinit();
1464 static void kvmppc_book3s_exit(void)
1466 kvmppc_mmu_hpte_sysexit();
1470 module_init(kvmppc_book3s_init);
1471 module_exit(kvmppc_book3s_exit);