2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <linux/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/setup.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
48 #define CREATE_TRACE_POINTS
51 /* #define EXIT_DEBUG */
52 /* #define DEBUG_EXT */
54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
58 /* Some compatibility defines */
59 #ifdef CONFIG_PPC_BOOK3S_32
60 #define MSR_USER32 MSR_USER
61 #define MSR_USER64 MSR_USER
62 #define HW_PAGE_SIZE PAGE_SIZE
63 #define HPTE_R_M _PAGE_COHERENT
66 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
68 ulong msr = kvmppc_get_msr(vcpu);
69 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
72 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
74 ulong msr = kvmppc_get_msr(vcpu);
75 ulong pc = kvmppc_get_pc(vcpu);
77 /* We are in DR only split real mode */
78 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
81 /* We have not fixed up the guest already */
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
85 /* The code is in fixupable address space */
86 if (pc & SPLIT_HACK_MASK)
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
93 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
95 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
97 #ifdef CONFIG_PPC_BOOK3S_64
98 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
99 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
100 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
105 /* Disable AIL if supported */
106 if (cpu_has_feature(CPU_FTR_HVMODE) &&
107 cpu_has_feature(CPU_FTR_ARCH_207S))
108 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
110 vcpu->cpu = smp_processor_id();
111 #ifdef CONFIG_PPC_BOOK3S_32
112 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
115 if (kvmppc_is_split_real(vcpu))
116 kvmppc_fixup_split_real(vcpu);
119 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
121 #ifdef CONFIG_PPC_BOOK3S_64
122 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
124 kvmppc_copy_from_svcpu(vcpu);
126 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
127 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
131 if (kvmppc_is_split_real(vcpu))
132 kvmppc_unfixup_split_real(vcpu);
134 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
135 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
137 /* Enable AIL if supported */
138 if (cpu_has_feature(CPU_FTR_HVMODE) &&
139 cpu_has_feature(CPU_FTR_ARCH_207S))
140 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
145 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
146 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
148 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
150 svcpu->gpr[0] = vcpu->arch.gpr[0];
151 svcpu->gpr[1] = vcpu->arch.gpr[1];
152 svcpu->gpr[2] = vcpu->arch.gpr[2];
153 svcpu->gpr[3] = vcpu->arch.gpr[3];
154 svcpu->gpr[4] = vcpu->arch.gpr[4];
155 svcpu->gpr[5] = vcpu->arch.gpr[5];
156 svcpu->gpr[6] = vcpu->arch.gpr[6];
157 svcpu->gpr[7] = vcpu->arch.gpr[7];
158 svcpu->gpr[8] = vcpu->arch.gpr[8];
159 svcpu->gpr[9] = vcpu->arch.gpr[9];
160 svcpu->gpr[10] = vcpu->arch.gpr[10];
161 svcpu->gpr[11] = vcpu->arch.gpr[11];
162 svcpu->gpr[12] = vcpu->arch.gpr[12];
163 svcpu->gpr[13] = vcpu->arch.gpr[13];
164 svcpu->cr = vcpu->arch.cr;
165 svcpu->xer = vcpu->arch.xer;
166 svcpu->ctr = vcpu->arch.ctr;
167 svcpu->lr = vcpu->arch.lr;
168 svcpu->pc = vcpu->arch.pc;
169 #ifdef CONFIG_PPC_BOOK3S_64
170 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
173 * Now also save the current time base value. We use this
174 * to find the guest purr and spurr value.
176 vcpu->arch.entry_tb = get_tb();
177 vcpu->arch.entry_vtb = get_vtb();
178 if (cpu_has_feature(CPU_FTR_ARCH_207S))
179 vcpu->arch.entry_ic = mfspr(SPRN_IC);
180 svcpu->in_use = true;
185 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
186 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
188 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
191 * Maybe we were already preempted and synced the svcpu from
192 * our preempt notifiers. Don't bother touching this svcpu then.
197 vcpu->arch.gpr[0] = svcpu->gpr[0];
198 vcpu->arch.gpr[1] = svcpu->gpr[1];
199 vcpu->arch.gpr[2] = svcpu->gpr[2];
200 vcpu->arch.gpr[3] = svcpu->gpr[3];
201 vcpu->arch.gpr[4] = svcpu->gpr[4];
202 vcpu->arch.gpr[5] = svcpu->gpr[5];
203 vcpu->arch.gpr[6] = svcpu->gpr[6];
204 vcpu->arch.gpr[7] = svcpu->gpr[7];
205 vcpu->arch.gpr[8] = svcpu->gpr[8];
206 vcpu->arch.gpr[9] = svcpu->gpr[9];
207 vcpu->arch.gpr[10] = svcpu->gpr[10];
208 vcpu->arch.gpr[11] = svcpu->gpr[11];
209 vcpu->arch.gpr[12] = svcpu->gpr[12];
210 vcpu->arch.gpr[13] = svcpu->gpr[13];
211 vcpu->arch.cr = svcpu->cr;
212 vcpu->arch.xer = svcpu->xer;
213 vcpu->arch.ctr = svcpu->ctr;
214 vcpu->arch.lr = svcpu->lr;
215 vcpu->arch.pc = svcpu->pc;
216 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
217 vcpu->arch.fault_dar = svcpu->fault_dar;
218 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
219 vcpu->arch.last_inst = svcpu->last_inst;
220 #ifdef CONFIG_PPC_BOOK3S_64
221 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
224 * Update purr and spurr using time base on exit.
226 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
227 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
228 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
229 if (cpu_has_feature(CPU_FTR_ARCH_207S))
230 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
231 svcpu->in_use = false;
237 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
239 int r = 1; /* Indicate we want to get back into the guest */
241 /* We misuse TLB_FLUSH to indicate that we want to clear
242 all shadow cache entries */
243 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
244 kvmppc_mmu_pte_flush(vcpu, 0, 0);
249 /************* MMU Notifiers *************/
250 static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
254 struct kvm_vcpu *vcpu;
255 struct kvm_memslots *slots;
256 struct kvm_memory_slot *memslot;
258 slots = kvm_memslots(kvm);
259 kvm_for_each_memslot(memslot, slots) {
260 unsigned long hva_start, hva_end;
263 hva_start = max(start, memslot->userspace_addr);
264 hva_end = min(end, memslot->userspace_addr +
265 (memslot->npages << PAGE_SHIFT));
266 if (hva_start >= hva_end)
269 * {gfn(page) | page intersects with [hva_start, hva_end)} =
270 * {gfn, gfn+1, ..., gfn_end-1}.
272 gfn = hva_to_gfn_memslot(hva_start, memslot);
273 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
274 kvm_for_each_vcpu(i, vcpu, kvm)
275 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
276 gfn_end << PAGE_SHIFT);
280 static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
283 do_kvm_unmap_hva(kvm, start, end);
288 static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
291 /* XXX could be more clever ;) */
295 static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
297 /* XXX could be more clever ;) */
301 static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
303 /* The page will get remapped properly on its next fault */
304 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
307 /*****************************************/
309 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
311 ulong guest_msr = kvmppc_get_msr(vcpu);
312 ulong smsr = guest_msr;
314 /* Guest MSR values */
315 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
316 /* Process MSR values */
317 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
318 /* External providers the guest reserved */
319 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
320 /* 64-bit Process MSR values */
321 #ifdef CONFIG_PPC_BOOK3S_64
322 smsr |= MSR_ISF | MSR_HV;
324 vcpu->arch.shadow_msr = smsr;
327 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
329 ulong old_msr = kvmppc_get_msr(vcpu);
332 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
335 msr &= to_book3s(vcpu)->msr_mask;
336 kvmppc_set_msr_fast(vcpu, msr);
337 kvmppc_recalc_shadow_msr(vcpu);
340 if (!vcpu->arch.pending_exceptions) {
341 kvm_vcpu_block(vcpu);
342 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
343 vcpu->stat.halt_wakeup++;
345 /* Unset POW bit after we woke up */
347 kvmppc_set_msr_fast(vcpu, msr);
351 if (kvmppc_is_split_real(vcpu))
352 kvmppc_fixup_split_real(vcpu);
354 kvmppc_unfixup_split_real(vcpu);
356 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
357 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
358 kvmppc_mmu_flush_segments(vcpu);
359 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
361 /* Preload magic page segment when in kernel mode */
362 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
363 struct kvm_vcpu_arch *a = &vcpu->arch;
366 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
368 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
373 * When switching from 32 to 64-bit, we may have a stale 32-bit
374 * magic page around, we need to flush it. Typically 32-bit magic
375 * page will be instanciated when calling into RTAS. Note: We
376 * assume that such transition only happens while in kernel mode,
377 * ie, we never transition from user 32-bit to kernel 64-bit with
378 * a 32-bit magic page around.
380 if (vcpu->arch.magic_page_pa &&
381 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
382 /* going from RTAS to normal kernel code */
383 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
387 /* Preload FPU if it's enabled */
388 if (kvmppc_get_msr(vcpu) & MSR_FP)
389 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
392 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
396 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
397 vcpu->arch.pvr = pvr;
398 #ifdef CONFIG_PPC_BOOK3S_64
399 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
400 kvmppc_mmu_book3s_64_init(vcpu);
401 if (!to_book3s(vcpu)->hior_explicit)
402 to_book3s(vcpu)->hior = 0xfff00000;
403 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
404 vcpu->arch.cpu_type = KVM_CPU_3S_64;
408 kvmppc_mmu_book3s_32_init(vcpu);
409 if (!to_book3s(vcpu)->hior_explicit)
410 to_book3s(vcpu)->hior = 0;
411 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
412 vcpu->arch.cpu_type = KVM_CPU_3S_32;
415 kvmppc_sanity_check(vcpu);
417 /* If we are in hypervisor level on 970, we can tell the CPU to
418 * treat DCBZ as 32 bytes store */
419 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
420 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
421 !strcmp(cur_cpu_spec->platform, "ppc970"))
422 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
424 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
425 really needs them in a VM on Cell and force disable them. */
426 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
427 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
430 * If they're asking for POWER6 or later, set the flag
431 * indicating that we can do multiple large page sizes
433 * Also set the flag that indicates that tlbie has the large
434 * page bit in the RB operand instead of the instruction.
436 switch (PVR_VER(pvr)) {
443 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
444 BOOK3S_HFLAG_NEW_TLBIE;
448 #ifdef CONFIG_PPC_BOOK3S_32
449 /* 32 bit Book3S always has 32 byte dcbz */
450 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
453 /* On some CPUs we can execute paired single operations natively */
454 asm ( "mfpvr %0" : "=r"(host_pvr));
456 case 0x00080200: /* lonestar 2.0 */
457 case 0x00088202: /* lonestar 2.2 */
458 case 0x70000100: /* gekko 1.0 */
459 case 0x00080100: /* gekko 2.0 */
460 case 0x00083203: /* gekko 2.3a */
461 case 0x00083213: /* gekko 2.3b */
462 case 0x00083204: /* gekko 2.4 */
463 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
464 case 0x00087200: /* broadway */
465 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
466 /* Enable HID2.PSE - in case we need it later */
467 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
471 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
472 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
473 * emulate 32 bytes dcbz length.
475 * The Book3s_64 inventors also realized this case and implemented a special bit
476 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
478 * My approach here is to patch the dcbz instruction on executing pages.
480 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
487 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
488 if (is_error_page(hpage))
491 hpage_offset = pte->raddr & ~PAGE_MASK;
492 hpage_offset &= ~0xFFFULL;
496 page = kmap_atomic(hpage);
498 /* patch dcbz into reserved instruction, so we trap */
499 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
500 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
501 page[i] &= cpu_to_be32(0xfffffff7);
507 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
509 ulong mp_pa = vcpu->arch.magic_page_pa;
511 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
512 mp_pa = (uint32_t)mp_pa;
515 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
519 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
522 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
523 ulong eaddr, int vec)
525 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
526 bool iswrite = false;
527 int r = RESUME_GUEST;
530 struct kvmppc_pte pte = { 0 };
531 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
532 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
535 relocated = data ? dr : ir;
536 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
539 /* Resolve real address if translation turned on */
541 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
543 pte.may_execute = true;
545 pte.may_write = true;
546 pte.raddr = eaddr & KVM_PAM;
548 pte.vpage = eaddr >> 12;
549 pte.page_size = MMU_PAGE_64K;
553 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
555 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
559 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
560 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
561 pte.raddr &= ~SPLIT_HACK_MASK;
564 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
566 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
567 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
569 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
573 page_found = -EINVAL;
577 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
578 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
580 * If we do the dcbz hack, we have to NX on every execution,
581 * so we can patch the executing code. This renders our guest
584 pte.may_execute = !data;
587 if (page_found == -ENOENT) {
588 /* Page not found in guest PTE entries */
589 u64 ssrr1 = vcpu->arch.shadow_srr1;
590 u64 msr = kvmppc_get_msr(vcpu);
591 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
592 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
593 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
594 kvmppc_book3s_queue_irqprio(vcpu, vec);
595 } else if (page_found == -EPERM) {
596 /* Storage protection */
597 u32 dsisr = vcpu->arch.fault_dsisr;
598 u64 ssrr1 = vcpu->arch.shadow_srr1;
599 u64 msr = kvmppc_get_msr(vcpu);
600 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
601 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
602 kvmppc_set_dsisr(vcpu, dsisr);
603 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
604 kvmppc_book3s_queue_irqprio(vcpu, vec);
605 } else if (page_found == -EINVAL) {
606 /* Page not found in guest SLB */
607 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
608 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
609 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
610 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
612 * There is already a host HPTE there, presumably
613 * a read-only one for a page the guest thinks
614 * is writable, so get rid of it first.
616 kvmppc_mmu_unmap_page(vcpu, &pte);
618 /* The guest's PTE is not mapped yet. Map on the host */
619 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
620 /* Exit KVM if mapping failed */
621 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
625 vcpu->stat.sp_storage++;
626 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
627 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
628 kvmppc_patch_dcbz(vcpu, &pte);
631 vcpu->stat.mmio_exits++;
632 vcpu->arch.paddr_accessed = pte.raddr;
633 vcpu->arch.vaddr_accessed = pte.eaddr;
634 r = kvmppc_emulate_mmio(run, vcpu);
635 if ( r == RESUME_HOST_NV )
642 /* Give up external provider (FPU, Altivec, VSX) */
643 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
645 struct thread_struct *t = ¤t->thread;
648 * VSX instructions can access FP and vector registers, so if
649 * we are giving up VSX, make sure we give up FP and VMX as well.
652 msr |= MSR_FP | MSR_VEC;
654 msr &= vcpu->arch.guest_owned_ext;
659 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
664 * Note that on CPUs with VSX, giveup_fpu stores
665 * both the traditional FP registers and the added VSX
666 * registers into thread.fp_state.fpr[].
668 if (t->regs->msr & MSR_FP)
670 t->fp_save_area = NULL;
673 #ifdef CONFIG_ALTIVEC
675 if (current->thread.regs->msr & MSR_VEC)
676 giveup_altivec(current);
677 t->vr_save_area = NULL;
681 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
682 kvmppc_recalc_shadow_msr(vcpu);
685 /* Give up facility (TAR / EBB / DSCR) */
686 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
688 #ifdef CONFIG_PPC_BOOK3S_64
689 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
690 /* Facility not available to the guest, ignore giveup request*/
696 vcpu->arch.tar = mfspr(SPRN_TAR);
697 mtspr(SPRN_TAR, current->thread.tar);
698 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
704 /* Handle external providers (FPU, Altivec, VSX) */
705 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
708 struct thread_struct *t = ¤t->thread;
710 /* When we have paired singles, we emulate in software */
711 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
714 if (!(kvmppc_get_msr(vcpu) & msr)) {
715 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
719 if (msr == MSR_VSX) {
720 /* No VSX? Give an illegal instruction interrupt */
722 if (!cpu_has_feature(CPU_FTR_VSX))
725 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
730 * We have to load up all the FP and VMX registers before
731 * we can let the guest use VSX instructions.
733 msr = MSR_FP | MSR_VEC | MSR_VSX;
736 /* See if we already own all the ext(s) needed */
737 msr &= ~vcpu->arch.guest_owned_ext;
742 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
748 load_fp_state(&vcpu->arch.fp);
750 t->fp_save_area = &vcpu->arch.fp;
755 #ifdef CONFIG_ALTIVEC
757 enable_kernel_altivec();
758 load_vr_state(&vcpu->arch.vr);
759 disable_kernel_altivec();
760 t->vr_save_area = &vcpu->arch.vr;
766 vcpu->arch.guest_owned_ext |= msr;
767 kvmppc_recalc_shadow_msr(vcpu);
773 * Kernel code using FP or VMX could have flushed guest state to
774 * the thread_struct; if so, get it back now.
776 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
778 unsigned long lost_ext;
780 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
784 if (lost_ext & MSR_FP) {
787 load_fp_state(&vcpu->arch.fp);
791 #ifdef CONFIG_ALTIVEC
792 if (lost_ext & MSR_VEC) {
794 enable_kernel_altivec();
795 load_vr_state(&vcpu->arch.vr);
796 disable_kernel_altivec();
800 current->thread.regs->msr |= lost_ext;
803 #ifdef CONFIG_PPC_BOOK3S_64
805 static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
807 /* Inject the Interrupt Cause field and trigger a guest interrupt */
808 vcpu->arch.fscr &= ~(0xffULL << 56);
809 vcpu->arch.fscr |= (fac << 56);
810 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
813 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
815 enum emulation_result er = EMULATE_FAIL;
817 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
818 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
820 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
821 /* Couldn't emulate, trigger interrupt in guest */
822 kvmppc_trigger_fac_interrupt(vcpu, fac);
826 /* Enable facilities (TAR, EBB, DSCR) for the guest */
827 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
829 bool guest_fac_enabled;
830 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
833 * Not every facility is enabled by FSCR bits, check whether the
834 * guest has this facility enabled at all.
839 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
842 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
845 guest_fac_enabled = false;
849 if (!guest_fac_enabled) {
850 /* Facility not enabled by the guest */
851 kvmppc_trigger_fac_interrupt(vcpu, fac);
857 /* TAR switching isn't lazy in Linux yet */
858 current->thread.tar = mfspr(SPRN_TAR);
859 mtspr(SPRN_TAR, vcpu->arch.tar);
860 vcpu->arch.shadow_fscr |= FSCR_TAR;
863 kvmppc_emulate_fac(vcpu, fac);
870 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
872 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
873 /* TAR got dropped, drop it in shadow too */
874 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
876 vcpu->arch.fscr = fscr;
880 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
882 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
883 u64 msr = kvmppc_get_msr(vcpu);
885 kvmppc_set_msr(vcpu, msr | MSR_SE);
889 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
891 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
892 u64 msr = kvmppc_get_msr(vcpu);
894 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
898 static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
899 unsigned int exit_nr)
901 enum emulation_result er;
907 * shadow_srr1 only contains valid flags if we came here via a program
908 * exception. The other exceptions (emulation assist, FP unavailable,
909 * etc.) do not provide flags in SRR1, so use an illegal-instruction
910 * exception when injecting a program interrupt into the guest.
912 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
913 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
915 flags = SRR1_PROGILL;
917 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
918 if (emul != EMULATE_DONE)
921 if (kvmppc_get_msr(vcpu) & MSR_PR) {
923 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
924 kvmppc_get_pc(vcpu), last_inst);
926 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
927 kvmppc_core_queue_program(vcpu, flags);
932 vcpu->stat.emulated_inst_exits++;
933 er = kvmppc_emulate_instruction(run, vcpu);
942 pr_crit("%s: emulation at %lx failed (%08x)\n",
943 __func__, kvmppc_get_pc(vcpu), last_inst);
944 kvmppc_core_queue_program(vcpu, flags);
947 case EMULATE_DO_MMIO:
948 run->exit_reason = KVM_EXIT_MMIO;
951 case EMULATE_EXIT_USER:
961 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
962 unsigned int exit_nr)
967 vcpu->stat.sum_exits++;
969 run->exit_reason = KVM_EXIT_UNKNOWN;
970 run->ready_for_interrupt_injection = 1;
972 /* We get here with MSR.EE=1 */
974 trace_kvm_exit(exit_nr, vcpu);
978 case BOOK3S_INTERRUPT_INST_STORAGE:
980 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
981 vcpu->stat.pf_instruc++;
983 if (kvmppc_is_split_real(vcpu))
984 kvmppc_fixup_split_real(vcpu);
986 #ifdef CONFIG_PPC_BOOK3S_32
987 /* We set segments as unused segments when invalidating them. So
988 * treat the respective fault as segment fault. */
990 struct kvmppc_book3s_shadow_vcpu *svcpu;
993 svcpu = svcpu_get(vcpu);
994 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
996 if (sr == SR_INVALID) {
997 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1004 /* only care about PTEG not found errors, but leave NX alone */
1005 if (shadow_srr1 & 0x40000000) {
1006 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1007 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
1008 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1009 vcpu->stat.sp_instruc++;
1010 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1011 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1013 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1014 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1015 * that no guest that needs the dcbz hack does NX.
1017 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1020 u64 msr = kvmppc_get_msr(vcpu);
1021 msr |= shadow_srr1 & 0x58000000;
1022 kvmppc_set_msr_fast(vcpu, msr);
1023 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1028 case BOOK3S_INTERRUPT_DATA_STORAGE:
1030 ulong dar = kvmppc_get_fault_dar(vcpu);
1031 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1032 vcpu->stat.pf_storage++;
1034 #ifdef CONFIG_PPC_BOOK3S_32
1035 /* We set segments as unused segments when invalidating them. So
1036 * treat the respective fault as segment fault. */
1038 struct kvmppc_book3s_shadow_vcpu *svcpu;
1041 svcpu = svcpu_get(vcpu);
1042 sr = svcpu->sr[dar >> SID_SHIFT];
1044 if (sr == SR_INVALID) {
1045 kvmppc_mmu_map_segment(vcpu, dar);
1053 * We need to handle missing shadow PTEs, and
1054 * protection faults due to us mapping a page read-only
1055 * when the guest thinks it is writable.
1057 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1058 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1059 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
1060 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1062 kvmppc_set_dar(vcpu, dar);
1063 kvmppc_set_dsisr(vcpu, fault_dsisr);
1064 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1069 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1070 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1071 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1072 kvmppc_book3s_queue_irqprio(vcpu,
1073 BOOK3S_INTERRUPT_DATA_SEGMENT);
1077 case BOOK3S_INTERRUPT_INST_SEGMENT:
1078 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1079 kvmppc_book3s_queue_irqprio(vcpu,
1080 BOOK3S_INTERRUPT_INST_SEGMENT);
1084 /* We're good on these - the host merely wanted to get our attention */
1085 case BOOK3S_INTERRUPT_DECREMENTER:
1086 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1087 case BOOK3S_INTERRUPT_DOORBELL:
1088 case BOOK3S_INTERRUPT_H_DOORBELL:
1089 vcpu->stat.dec_exits++;
1092 case BOOK3S_INTERRUPT_EXTERNAL:
1093 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1094 case BOOK3S_INTERRUPT_EXTERNAL_HV:
1095 vcpu->stat.ext_intr_exits++;
1098 case BOOK3S_INTERRUPT_PERFMON:
1101 case BOOK3S_INTERRUPT_PROGRAM:
1102 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1103 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1105 case BOOK3S_INTERRUPT_SYSCALL:
1110 /* Get last sc for papr */
1111 if (vcpu->arch.papr_enabled) {
1112 /* The sc instuction points SRR0 to the next inst */
1113 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1114 if (emul != EMULATE_DONE) {
1115 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1121 if (vcpu->arch.papr_enabled &&
1122 (last_sc == 0x44000022) &&
1123 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1124 /* SC 1 papr hypercalls */
1125 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1128 #ifdef CONFIG_PPC_BOOK3S_64
1129 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1135 run->papr_hcall.nr = cmd;
1136 for (i = 0; i < 9; ++i) {
1137 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1138 run->papr_hcall.args[i] = gpr;
1140 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1141 vcpu->arch.hcall_needed = 1;
1143 } else if (vcpu->arch.osi_enabled &&
1144 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1145 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1146 /* MOL hypercalls */
1147 u64 *gprs = run->osi.gprs;
1150 run->exit_reason = KVM_EXIT_OSI;
1151 for (i = 0; i < 32; i++)
1152 gprs[i] = kvmppc_get_gpr(vcpu, i);
1153 vcpu->arch.osi_needed = 1;
1155 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1156 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1157 /* KVM PV hypercalls */
1158 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1161 /* Guest syscalls */
1162 vcpu->stat.syscall_exits++;
1163 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1168 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1169 case BOOK3S_INTERRUPT_ALTIVEC:
1170 case BOOK3S_INTERRUPT_VSX:
1176 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1177 /* Do paired single instruction emulation */
1178 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1180 if (emul == EMULATE_DONE)
1181 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1188 /* Enable external provider */
1190 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1194 case BOOK3S_INTERRUPT_ALTIVEC:
1198 case BOOK3S_INTERRUPT_VSX:
1203 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1206 case BOOK3S_INTERRUPT_ALIGNMENT:
1209 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1211 if (emul == EMULATE_DONE) {
1215 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1216 dar = kvmppc_alignment_dar(vcpu, last_inst);
1218 kvmppc_set_dsisr(vcpu, dsisr);
1219 kvmppc_set_dar(vcpu, dar);
1221 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1226 #ifdef CONFIG_PPC_BOOK3S_64
1227 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1228 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1232 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1233 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1236 case BOOK3S_INTERRUPT_TRACE:
1237 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1238 run->exit_reason = KVM_EXIT_DEBUG;
1241 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1247 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1248 /* Ugh - bork here! What did we get? */
1249 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1250 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1257 if (!(r & RESUME_HOST)) {
1258 /* To avoid clobbering exit_reason, only check for signals if
1259 * we aren't already exiting to userspace for some other
1263 * Interrupts could be timers for the guest which we have to
1264 * inject again, so let's postpone them until we're in the guest
1265 * and if we really did time things so badly, then we just exit
1266 * again due to a host external interrupt.
1268 s = kvmppc_prepare_to_enter(vcpu);
1272 /* interrupts now hard-disabled */
1273 kvmppc_fix_ee_before_entry();
1276 kvmppc_handle_lost_ext(vcpu);
1279 trace_kvm_book3s_reenter(r, vcpu);
1284 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1285 struct kvm_sregs *sregs)
1287 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1290 sregs->pvr = vcpu->arch.pvr;
1292 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1293 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1294 for (i = 0; i < 64; i++) {
1295 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1296 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1299 for (i = 0; i < 16; i++)
1300 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1302 for (i = 0; i < 8; i++) {
1303 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1304 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1311 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1312 struct kvm_sregs *sregs)
1314 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1317 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1319 vcpu3s->sdr1 = sregs->u.s.sdr1;
1320 #ifdef CONFIG_PPC_BOOK3S_64
1321 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1322 /* Flush all SLB entries */
1323 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1324 vcpu->arch.mmu.slbia(vcpu);
1326 for (i = 0; i < 64; i++) {
1327 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1328 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1330 if (rb & SLB_ESID_V)
1331 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1336 for (i = 0; i < 16; i++) {
1337 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1339 for (i = 0; i < 8; i++) {
1340 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1341 (u32)sregs->u.s.ppc32.ibat[i]);
1342 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1343 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1344 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1345 (u32)sregs->u.s.ppc32.dbat[i]);
1346 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1347 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1351 /* Flush the MMU after messing with the segments */
1352 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1357 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1358 union kvmppc_one_reg *val)
1363 case KVM_REG_PPC_DEBUG_INST:
1364 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1366 case KVM_REG_PPC_HIOR:
1367 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1369 case KVM_REG_PPC_VTB:
1370 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1372 case KVM_REG_PPC_LPCR:
1373 case KVM_REG_PPC_LPCR_64:
1375 * We are only interested in the LPCR_ILE bit
1377 if (vcpu->arch.intr_msr & MSR_LE)
1378 *val = get_reg_val(id, LPCR_ILE);
1380 *val = get_reg_val(id, 0);
1390 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1392 if (new_lpcr & LPCR_ILE)
1393 vcpu->arch.intr_msr |= MSR_LE;
1395 vcpu->arch.intr_msr &= ~MSR_LE;
1398 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1399 union kvmppc_one_reg *val)
1404 case KVM_REG_PPC_HIOR:
1405 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1406 to_book3s(vcpu)->hior_explicit = true;
1408 case KVM_REG_PPC_VTB:
1409 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1411 case KVM_REG_PPC_LPCR:
1412 case KVM_REG_PPC_LPCR_64:
1413 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1423 static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1426 struct kvmppc_vcpu_book3s *vcpu_book3s;
1427 struct kvm_vcpu *vcpu;
1431 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1435 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1438 vcpu->arch.book3s = vcpu_book3s;
1440 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1441 vcpu->arch.shadow_vcpu =
1442 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1443 if (!vcpu->arch.shadow_vcpu)
1447 err = kvm_vcpu_init(vcpu, kvm, id);
1449 goto free_shadow_vcpu;
1452 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1455 vcpu->arch.shared = (void *)p;
1456 #ifdef CONFIG_PPC_BOOK3S_64
1457 /* Always start the shared struct in native endian mode */
1458 #ifdef __BIG_ENDIAN__
1459 vcpu->arch.shared_big_endian = true;
1461 vcpu->arch.shared_big_endian = false;
1465 * Default to the same as the host if we're on sufficiently
1466 * recent machine that we have 1TB segments;
1467 * otherwise default to PPC970FX.
1469 vcpu->arch.pvr = 0x3C0301;
1470 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1471 vcpu->arch.pvr = mfspr(SPRN_PVR);
1472 vcpu->arch.intr_msr = MSR_SF;
1474 /* default to book3s_32 (750) */
1475 vcpu->arch.pvr = 0x84202;
1477 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1478 vcpu->arch.slb_nr = 64;
1480 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1482 err = kvmppc_mmu_init(vcpu);
1489 kvm_vcpu_uninit(vcpu);
1491 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1492 kfree(vcpu->arch.shadow_vcpu);
1497 kmem_cache_free(kvm_vcpu_cache, vcpu);
1499 return ERR_PTR(err);
1502 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1504 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1506 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1507 kvm_vcpu_uninit(vcpu);
1508 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1509 kfree(vcpu->arch.shadow_vcpu);
1512 kmem_cache_free(kvm_vcpu_cache, vcpu);
1515 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1518 #ifdef CONFIG_ALTIVEC
1519 unsigned long uninitialized_var(vrsave);
1522 /* Check if we can run the vcpu at all */
1523 if (!vcpu->arch.sane) {
1524 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1529 kvmppc_setup_debug(vcpu);
1532 * Interrupts could be timers for the guest which we have to inject
1533 * again, so let's postpone them until we're in the guest and if we
1534 * really did time things so badly, then we just exit again due to
1535 * a host external interrupt.
1537 ret = kvmppc_prepare_to_enter(vcpu);
1540 /* interrupts now hard-disabled */
1542 /* Save FPU, Altivec and VSX state */
1543 giveup_all(current);
1545 /* Preload FPU if it's enabled */
1546 if (kvmppc_get_msr(vcpu) & MSR_FP)
1547 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1549 kvmppc_fix_ee_before_entry();
1551 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1553 kvmppc_clear_debug(vcpu);
1555 /* No need for guest_exit. It's done in handle_exit.
1556 We also get here with interrupts enabled. */
1558 /* Make sure we save the guest FPU/Altivec/VSX state */
1559 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1561 /* Make sure we save the guest TAR/EBB/DSCR state */
1562 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1565 vcpu->mode = OUTSIDE_GUEST_MODE;
1570 * Get (and clear) the dirty memory log for a memory slot.
1572 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1573 struct kvm_dirty_log *log)
1575 struct kvm_memslots *slots;
1576 struct kvm_memory_slot *memslot;
1577 struct kvm_vcpu *vcpu;
1583 mutex_lock(&kvm->slots_lock);
1585 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1589 /* If nothing is dirty, don't bother messing with page tables. */
1591 slots = kvm_memslots(kvm);
1592 memslot = id_to_memslot(slots, log->slot);
1594 ga = memslot->base_gfn << PAGE_SHIFT;
1595 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1597 kvm_for_each_vcpu(n, vcpu, kvm)
1598 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1600 n = kvm_dirty_bitmap_bytes(memslot);
1601 memset(memslot->dirty_bitmap, 0, n);
1606 mutex_unlock(&kvm->slots_lock);
1610 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1611 struct kvm_memory_slot *memslot)
1616 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1617 struct kvm_memory_slot *memslot,
1618 const struct kvm_userspace_memory_region *mem)
1623 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1624 const struct kvm_userspace_memory_region *mem,
1625 const struct kvm_memory_slot *old,
1626 const struct kvm_memory_slot *new)
1631 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1632 struct kvm_memory_slot *dont)
1637 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1638 unsigned long npages)
1645 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1646 struct kvm_ppc_smmu_info *info)
1649 struct kvm_vcpu *vcpu;
1653 /* SLB is always 64 entries */
1654 info->slb_size = 64;
1656 /* Standard 4k base page size segment */
1657 info->sps[0].page_shift = 12;
1658 info->sps[0].slb_enc = 0;
1659 info->sps[0].enc[0].page_shift = 12;
1660 info->sps[0].enc[0].pte_enc = 0;
1663 * 64k large page size.
1664 * We only want to put this in if the CPUs we're emulating
1665 * support it, but unfortunately we don't have a vcpu easily
1666 * to hand here to test. Just pick the first vcpu, and if
1667 * that doesn't exist yet, report the minimum capability,
1668 * i.e., no 64k pages.
1669 * 1T segment support goes along with 64k pages.
1672 vcpu = kvm_get_vcpu(kvm, 0);
1673 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1674 info->flags = KVM_PPC_1T_SEGMENTS;
1675 info->sps[i].page_shift = 16;
1676 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1677 info->sps[i].enc[0].page_shift = 16;
1678 info->sps[i].enc[0].pte_enc = 1;
1682 /* Standard 16M large page size segment */
1683 info->sps[i].page_shift = 24;
1684 info->sps[i].slb_enc = SLB_VSID_L;
1685 info->sps[i].enc[0].page_shift = 24;
1686 info->sps[i].enc[0].pte_enc = 0;
1691 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1692 struct kvm_ppc_smmu_info *info)
1694 /* We should not get called */
1697 #endif /* CONFIG_PPC64 */
1699 static unsigned int kvm_global_user_count = 0;
1700 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1702 static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1704 mutex_init(&kvm->arch.hpt_mutex);
1706 #ifdef CONFIG_PPC_BOOK3S_64
1707 /* Start out with the default set of hcalls enabled */
1708 kvmppc_pr_init_default_hcalls(kvm);
1711 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1712 spin_lock(&kvm_global_user_count_lock);
1713 if (++kvm_global_user_count == 1)
1714 pseries_disable_reloc_on_exc();
1715 spin_unlock(&kvm_global_user_count_lock);
1720 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1723 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1726 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1727 spin_lock(&kvm_global_user_count_lock);
1728 BUG_ON(kvm_global_user_count == 0);
1729 if (--kvm_global_user_count == 0)
1730 pseries_enable_reloc_on_exc();
1731 spin_unlock(&kvm_global_user_count_lock);
1735 static int kvmppc_core_check_processor_compat_pr(void)
1738 * Disable KVM for Power9 untill the required bits merged.
1740 if (cpu_has_feature(CPU_FTR_ARCH_300))
1745 static long kvm_arch_vm_ioctl_pr(struct file *filp,
1746 unsigned int ioctl, unsigned long arg)
1751 static struct kvmppc_ops kvm_ops_pr = {
1752 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1753 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1754 .get_one_reg = kvmppc_get_one_reg_pr,
1755 .set_one_reg = kvmppc_set_one_reg_pr,
1756 .vcpu_load = kvmppc_core_vcpu_load_pr,
1757 .vcpu_put = kvmppc_core_vcpu_put_pr,
1758 .set_msr = kvmppc_set_msr_pr,
1759 .vcpu_run = kvmppc_vcpu_run_pr,
1760 .vcpu_create = kvmppc_core_vcpu_create_pr,
1761 .vcpu_free = kvmppc_core_vcpu_free_pr,
1762 .check_requests = kvmppc_core_check_requests_pr,
1763 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1764 .flush_memslot = kvmppc_core_flush_memslot_pr,
1765 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1766 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1767 .unmap_hva_range = kvm_unmap_hva_range_pr,
1768 .age_hva = kvm_age_hva_pr,
1769 .test_age_hva = kvm_test_age_hva_pr,
1770 .set_spte_hva = kvm_set_spte_hva_pr,
1771 .mmu_destroy = kvmppc_mmu_destroy_pr,
1772 .free_memslot = kvmppc_core_free_memslot_pr,
1773 .create_memslot = kvmppc_core_create_memslot_pr,
1774 .init_vm = kvmppc_core_init_vm_pr,
1775 .destroy_vm = kvmppc_core_destroy_vm_pr,
1776 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1777 .emulate_op = kvmppc_core_emulate_op_pr,
1778 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1779 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1780 .fast_vcpu_kick = kvm_vcpu_kick,
1781 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1782 #ifdef CONFIG_PPC_BOOK3S_64
1783 .hcall_implemented = kvmppc_hcall_impl_pr,
1788 int kvmppc_book3s_init_pr(void)
1792 r = kvmppc_core_check_processor_compat_pr();
1796 kvm_ops_pr.owner = THIS_MODULE;
1797 kvmppc_pr_ops = &kvm_ops_pr;
1799 r = kvmppc_mmu_hpte_sysinit();
1803 void kvmppc_book3s_exit_pr(void)
1805 kvmppc_pr_ops = NULL;
1806 kvmppc_mmu_hpte_sysexit();
1810 * We only support separate modules for book3s 64
1812 #ifdef CONFIG_PPC_BOOK3S_64
1814 module_init(kvmppc_book3s_init_pr);
1815 module_exit(kvmppc_book3s_exit_pr);
1817 MODULE_LICENSE("GPL");
1818 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1819 MODULE_ALIAS("devname:kvm");