2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
40 #include <asm/switch_to.h>
45 #include "../mm/mmu_decl.h"
47 #define CREATE_TRACE_POINTS
50 struct kvmppc_ops *kvmppc_hv_ops;
51 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
52 struct kvmppc_ops *kvmppc_pr_ops;
53 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
56 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
58 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
72 * Common checks before entering the guest world. Call with interrupts
77 * == 1 if we're ready to go into guest state
78 * <= 0 if we need to go back to the host with return value
80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
84 WARN_ON(irqs_disabled());
95 if (signal_pending(current)) {
96 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
97 vcpu->run->exit_reason = KVM_EXIT_INTR;
102 vcpu->mode = IN_GUEST_MODE;
105 * Reading vcpu->requests must happen after setting vcpu->mode,
106 * so we don't miss a request because the requester sees
107 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
108 * before next entering the guest (and thus doesn't IPI).
109 * This also orders the write to mode from any reads
110 * to the page tables done while the VCPU is running.
111 * Please see the comment in kvm_flush_remote_tlbs.
115 if (kvm_request_pending(vcpu)) {
116 /* Make sure we process requests preemptable */
118 trace_kvm_check_requests(vcpu);
119 r = kvmppc_core_check_requests(vcpu);
126 if (kvmppc_core_prepare_to_enter(vcpu)) {
127 /* interrupts got enabled in between, so we
128 are back at square 1 */
132 guest_enter_irqoff();
140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
145 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
148 shared->sprg0 = swab64(shared->sprg0);
149 shared->sprg1 = swab64(shared->sprg1);
150 shared->sprg2 = swab64(shared->sprg2);
151 shared->sprg3 = swab64(shared->sprg3);
152 shared->srr0 = swab64(shared->srr0);
153 shared->srr1 = swab64(shared->srr1);
154 shared->dar = swab64(shared->dar);
155 shared->msr = swab64(shared->msr);
156 shared->dsisr = swab32(shared->dsisr);
157 shared->int_pending = swab32(shared->int_pending);
158 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
159 shared->sr[i] = swab32(shared->sr[i]);
163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
165 int nr = kvmppc_get_gpr(vcpu, 11);
167 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
168 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
169 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
170 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
171 unsigned long r2 = 0;
173 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
175 param1 &= 0xffffffff;
176 param2 &= 0xffffffff;
177 param3 &= 0xffffffff;
178 param4 &= 0xffffffff;
182 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
185 /* Book3S can be little endian, find it out here */
186 int shared_big_endian = true;
187 if (vcpu->arch.intr_msr & MSR_LE)
188 shared_big_endian = false;
189 if (shared_big_endian != vcpu->arch.shared_big_endian)
190 kvmppc_swab_shared(vcpu);
191 vcpu->arch.shared_big_endian = shared_big_endian;
194 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
196 * Older versions of the Linux magic page code had
197 * a bug where they would map their trampoline code
198 * NX. If that's the case, remove !PR NX capability.
200 vcpu->arch.disable_kernel_nx = true;
201 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
204 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
205 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
207 #ifdef CONFIG_PPC_64K_PAGES
209 * Make sure our 4k magic page is in the same window of a 64k
210 * page within the guest and within the host's page.
212 if ((vcpu->arch.magic_page_pa & 0xf000) !=
213 ((ulong)vcpu->arch.shared & 0xf000)) {
214 void *old_shared = vcpu->arch.shared;
215 ulong shared = (ulong)vcpu->arch.shared;
219 shared |= vcpu->arch.magic_page_pa & 0xf000;
220 new_shared = (void*)shared;
221 memcpy(new_shared, old_shared, 0x1000);
222 vcpu->arch.shared = new_shared;
226 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
231 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
234 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
237 /* Second return value is in r4 */
239 case EV_HCALL_TOKEN(EV_IDLE):
241 kvm_vcpu_block(vcpu);
242 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
245 r = EV_UNIMPLEMENTED;
249 kvmppc_set_gpr(vcpu, 4, r2);
253 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
255 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
259 /* We have to know what CPU to virtualize */
263 /* PAPR only works with book3s_64 */
264 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
267 /* HV KVM can only do PAPR mode for now */
268 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
271 #ifdef CONFIG_KVM_BOOKE_HV
272 if (!cpu_has_feature(CPU_FTR_EMB_HV))
280 return r ? 0 : -EINVAL;
282 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
284 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
286 enum emulation_result er;
289 er = kvmppc_emulate_loadstore(vcpu);
292 /* Future optimization: only reload non-volatiles if they were
293 * actually modified. */
299 case EMULATE_DO_MMIO:
300 run->exit_reason = KVM_EXIT_MMIO;
301 /* We must reload nonvolatiles because "update" load/store
302 * instructions modify register state. */
303 /* Future optimization: only reload non-volatiles if they were
304 * actually modified. */
311 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
312 /* XXX Deliver Program interrupt to guest. */
313 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
324 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
326 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
329 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
330 struct kvmppc_pte pte;
335 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
345 /* Magic page override */
346 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
347 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
348 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
349 void *magic = vcpu->arch.shared;
350 magic += pte.eaddr & 0xfff;
351 memcpy(magic, ptr, size);
355 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
356 return EMULATE_DO_MMIO;
360 EXPORT_SYMBOL_GPL(kvmppc_st);
362 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
365 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
366 struct kvmppc_pte pte;
371 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
381 if (!data && !pte.may_execute)
384 /* Magic page override */
385 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
386 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
387 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
388 void *magic = vcpu->arch.shared;
389 magic += pte.eaddr & 0xfff;
390 memcpy(ptr, magic, size);
394 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
395 return EMULATE_DO_MMIO;
399 EXPORT_SYMBOL_GPL(kvmppc_ld);
401 int kvm_arch_hardware_enable(void)
406 int kvm_arch_hardware_setup(void)
411 void kvm_arch_check_processor_compat(void *rtn)
413 *(int *)rtn = kvmppc_core_check_processor_compat();
416 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
418 struct kvmppc_ops *kvm_ops = NULL;
420 * if we have both HV and PR enabled, default is HV
424 kvm_ops = kvmppc_hv_ops;
426 kvm_ops = kvmppc_pr_ops;
429 } else if (type == KVM_VM_PPC_HV) {
432 kvm_ops = kvmppc_hv_ops;
433 } else if (type == KVM_VM_PPC_PR) {
436 kvm_ops = kvmppc_pr_ops;
440 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
443 kvm->arch.kvm_ops = kvm_ops;
444 return kvmppc_core_init_vm(kvm);
449 bool kvm_arch_has_vcpu_debugfs(void)
454 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
459 void kvm_arch_destroy_vm(struct kvm *kvm)
462 struct kvm_vcpu *vcpu;
464 #ifdef CONFIG_KVM_XICS
466 * We call kick_all_cpus_sync() to ensure that all
467 * CPUs have executed any pending IPIs before we
468 * continue and free VCPUs structures below.
470 if (is_kvmppc_hv_enabled(kvm))
471 kick_all_cpus_sync();
474 kvm_for_each_vcpu(i, vcpu, kvm)
475 kvm_arch_vcpu_free(vcpu);
477 mutex_lock(&kvm->lock);
478 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
479 kvm->vcpus[i] = NULL;
481 atomic_set(&kvm->online_vcpus, 0);
483 kvmppc_core_destroy_vm(kvm);
485 mutex_unlock(&kvm->lock);
487 /* drop the module reference */
488 module_put(kvm->arch.kvm_ops->owner);
491 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
494 /* Assume we're using HV mode when the HV module is loaded */
495 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
499 * Hooray - we know which VM type we're running on. Depend on
500 * that rather than the guess above.
502 hv_enabled = is_kvmppc_hv_enabled(kvm);
507 case KVM_CAP_PPC_BOOKE_SREGS:
508 case KVM_CAP_PPC_BOOKE_WATCHDOG:
509 case KVM_CAP_PPC_EPR:
511 case KVM_CAP_PPC_SEGSTATE:
512 case KVM_CAP_PPC_HIOR:
513 case KVM_CAP_PPC_PAPR:
515 case KVM_CAP_PPC_UNSET_IRQ:
516 case KVM_CAP_PPC_IRQ_LEVEL:
517 case KVM_CAP_ENABLE_CAP:
518 case KVM_CAP_ENABLE_CAP_VM:
519 case KVM_CAP_ONE_REG:
520 case KVM_CAP_IOEVENTFD:
521 case KVM_CAP_DEVICE_CTRL:
522 case KVM_CAP_IMMEDIATE_EXIT:
525 case KVM_CAP_PPC_PAIRED_SINGLES:
526 case KVM_CAP_PPC_OSI:
527 case KVM_CAP_PPC_GET_PVINFO:
528 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
531 /* We support this only for PR */
534 #ifdef CONFIG_KVM_MPIC
535 case KVM_CAP_IRQ_MPIC:
540 #ifdef CONFIG_PPC_BOOK3S_64
541 case KVM_CAP_SPAPR_TCE:
542 case KVM_CAP_SPAPR_TCE_64:
544 case KVM_CAP_SPAPR_TCE_VFIO:
545 case KVM_CAP_PPC_RTAS:
546 case KVM_CAP_PPC_FIXUP_HCALL:
547 case KVM_CAP_PPC_ENABLE_HCALL:
548 #ifdef CONFIG_KVM_XICS
549 case KVM_CAP_IRQ_XICS:
554 case KVM_CAP_PPC_ALLOC_HTAB:
557 #endif /* CONFIG_PPC_BOOK3S_64 */
558 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
559 case KVM_CAP_PPC_SMT:
562 if (kvm->arch.emul_smt_mode > 1)
563 r = kvm->arch.emul_smt_mode;
565 r = kvm->arch.smt_mode;
566 } else if (hv_enabled) {
567 if (cpu_has_feature(CPU_FTR_ARCH_300))
570 r = threads_per_subcore;
573 case KVM_CAP_PPC_SMT_POSSIBLE:
576 if (!cpu_has_feature(CPU_FTR_ARCH_300))
577 r = ((threads_per_subcore << 1) - 1);
579 /* P9 can emulate dbells, so allow any mode */
583 case KVM_CAP_PPC_RMA:
586 case KVM_CAP_PPC_HWRNG:
587 r = kvmppc_hwrng_present();
589 case KVM_CAP_PPC_MMU_RADIX:
590 r = !!(hv_enabled && radix_enabled());
592 case KVM_CAP_PPC_MMU_HASH_V3:
593 r = !!(hv_enabled && !radix_enabled() &&
594 cpu_has_feature(CPU_FTR_ARCH_300));
597 case KVM_CAP_SYNC_MMU:
598 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
600 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
606 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
607 case KVM_CAP_PPC_HTAB_FD:
611 case KVM_CAP_NR_VCPUS:
613 * Recommending a number of CPUs is somewhat arbitrary; we
614 * return the number of present CPUs for -HV (since a host
615 * will have secondary threads "offline"), and for other KVM
616 * implementations just count online CPUs.
619 r = num_present_cpus();
621 r = num_online_cpus();
623 case KVM_CAP_NR_MEMSLOTS:
624 r = KVM_USER_MEM_SLOTS;
626 case KVM_CAP_MAX_VCPUS:
629 #ifdef CONFIG_PPC_BOOK3S_64
630 case KVM_CAP_PPC_GET_SMMU_INFO:
633 case KVM_CAP_SPAPR_MULTITCE:
636 case KVM_CAP_SPAPR_RESIZE_HPT:
637 /* Disable this on POWER9 until code handles new HPTE format */
638 r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
641 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
642 case KVM_CAP_PPC_FWNMI:
646 case KVM_CAP_PPC_HTM:
648 (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
658 long kvm_arch_dev_ioctl(struct file *filp,
659 unsigned int ioctl, unsigned long arg)
664 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
665 struct kvm_memory_slot *dont)
667 kvmppc_core_free_memslot(kvm, free, dont);
670 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
671 unsigned long npages)
673 return kvmppc_core_create_memslot(kvm, slot, npages);
676 int kvm_arch_prepare_memory_region(struct kvm *kvm,
677 struct kvm_memory_slot *memslot,
678 const struct kvm_userspace_memory_region *mem,
679 enum kvm_mr_change change)
681 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
684 void kvm_arch_commit_memory_region(struct kvm *kvm,
685 const struct kvm_userspace_memory_region *mem,
686 const struct kvm_memory_slot *old,
687 const struct kvm_memory_slot *new,
688 enum kvm_mr_change change)
690 kvmppc_core_commit_memory_region(kvm, mem, old, new);
693 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
694 struct kvm_memory_slot *slot)
696 kvmppc_core_flush_memslot(kvm, slot);
699 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
701 struct kvm_vcpu *vcpu;
702 vcpu = kvmppc_core_vcpu_create(kvm, id);
704 vcpu->arch.wqp = &vcpu->wq;
705 kvmppc_create_vcpu_debugfs(vcpu, id);
710 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
714 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
716 /* Make sure we're not using the vcpu anymore */
717 hrtimer_cancel(&vcpu->arch.dec_timer);
719 kvmppc_remove_vcpu_debugfs(vcpu);
721 switch (vcpu->arch.irq_type) {
722 case KVMPPC_IRQ_MPIC:
723 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
725 case KVMPPC_IRQ_XICS:
727 kvmppc_xive_cleanup_vcpu(vcpu);
729 kvmppc_xics_free_icp(vcpu);
733 kvmppc_core_vcpu_free(vcpu);
736 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
738 kvm_arch_vcpu_free(vcpu);
741 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
743 return kvmppc_core_pending_dec(vcpu);
746 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
748 struct kvm_vcpu *vcpu;
750 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
751 kvmppc_decrementer_func(vcpu);
753 return HRTIMER_NORESTART;
756 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
760 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
761 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
762 vcpu->arch.dec_expires = ~(u64)0;
764 #ifdef CONFIG_KVM_EXIT_TIMING
765 mutex_init(&vcpu->arch.exit_timing_lock);
767 ret = kvmppc_subarch_vcpu_init(vcpu);
771 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
773 kvmppc_mmu_destroy(vcpu);
774 kvmppc_subarch_vcpu_uninit(vcpu);
777 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
781 * vrsave (formerly usprg0) isn't used by Linux, but may
782 * be used by the guest.
784 * On non-booke this is associated with Altivec and
785 * is handled by code in book3s.c.
787 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
789 kvmppc_core_vcpu_load(vcpu, cpu);
792 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
794 kvmppc_core_vcpu_put(vcpu);
796 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
801 * irq_bypass_add_producer and irq_bypass_del_producer are only
802 * useful if the architecture supports PCI passthrough.
803 * irq_bypass_stop and irq_bypass_start are not needed and so
804 * kvm_ops are not defined for them.
806 bool kvm_arch_has_irq_bypass(void)
808 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
809 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
812 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
813 struct irq_bypass_producer *prod)
815 struct kvm_kernel_irqfd *irqfd =
816 container_of(cons, struct kvm_kernel_irqfd, consumer);
817 struct kvm *kvm = irqfd->kvm;
819 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
820 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
825 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
826 struct irq_bypass_producer *prod)
828 struct kvm_kernel_irqfd *irqfd =
829 container_of(cons, struct kvm_kernel_irqfd, consumer);
830 struct kvm *kvm = irqfd->kvm;
832 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
833 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
837 static inline int kvmppc_get_vsr_dword_offset(int index)
841 if ((index != 0) && (index != 1))
853 static inline int kvmppc_get_vsr_word_offset(int index)
857 if ((index > 3) || (index < 0))
868 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
871 union kvmppc_one_reg val;
872 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
873 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
878 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
879 val.vval = VCPU_VSX_VR(vcpu, index);
880 val.vsxval[offset] = gpr;
881 VCPU_VSX_VR(vcpu, index) = val.vval;
883 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
887 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
890 union kvmppc_one_reg val;
891 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
893 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
894 val.vval = VCPU_VSX_VR(vcpu, index);
897 VCPU_VSX_VR(vcpu, index) = val.vval;
899 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
900 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
904 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
907 union kvmppc_one_reg val;
908 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
909 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
910 int dword_offset, word_offset;
915 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
916 val.vval = VCPU_VSX_VR(vcpu, index);
917 val.vsx32val[offset] = gpr32;
918 VCPU_VSX_VR(vcpu, index) = val.vval;
920 dword_offset = offset / 2;
921 word_offset = offset % 2;
922 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
923 val.vsx32val[word_offset] = gpr32;
924 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
927 #endif /* CONFIG_VSX */
929 #ifdef CONFIG_PPC_FPU
930 static inline u64 sp_to_dp(u32 fprs)
936 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
942 static inline u32 dp_to_sp(u64 fprd)
948 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
955 #define sp_to_dp(x) (x)
956 #define dp_to_sp(x) (x)
957 #endif /* CONFIG_PPC_FPU */
959 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
962 u64 uninitialized_var(gpr);
964 if (run->mmio.len > sizeof(gpr)) {
965 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
969 if (!vcpu->arch.mmio_host_swabbed) {
970 switch (run->mmio.len) {
971 case 8: gpr = *(u64 *)run->mmio.data; break;
972 case 4: gpr = *(u32 *)run->mmio.data; break;
973 case 2: gpr = *(u16 *)run->mmio.data; break;
974 case 1: gpr = *(u8 *)run->mmio.data; break;
977 switch (run->mmio.len) {
978 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
979 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
980 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
981 case 1: gpr = *(u8 *)run->mmio.data; break;
985 /* conversion between single and double precision */
986 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
989 if (vcpu->arch.mmio_sign_extend) {
990 switch (run->mmio.len) {
1005 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1006 case KVM_MMIO_REG_GPR:
1007 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1009 case KVM_MMIO_REG_FPR:
1010 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1012 #ifdef CONFIG_PPC_BOOK3S
1013 case KVM_MMIO_REG_QPR:
1014 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1016 case KVM_MMIO_REG_FQPR:
1017 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1018 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1022 case KVM_MMIO_REG_VSX:
1023 if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
1024 kvmppc_set_vsr_dword(vcpu, gpr);
1025 else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
1026 kvmppc_set_vsr_word(vcpu, gpr);
1027 else if (vcpu->arch.mmio_vsx_copy_type ==
1028 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1029 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1037 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1038 unsigned int rt, unsigned int bytes,
1039 int is_default_endian, int sign_extend)
1044 /* Pity C doesn't have a logical XOR operator */
1045 if (kvmppc_need_byteswap(vcpu)) {
1046 host_swabbed = is_default_endian;
1048 host_swabbed = !is_default_endian;
1051 if (bytes > sizeof(run->mmio.data)) {
1052 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1056 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1057 run->mmio.len = bytes;
1058 run->mmio.is_write = 0;
1060 vcpu->arch.io_gpr = rt;
1061 vcpu->arch.mmio_host_swabbed = host_swabbed;
1062 vcpu->mmio_needed = 1;
1063 vcpu->mmio_is_write = 0;
1064 vcpu->arch.mmio_sign_extend = sign_extend;
1066 idx = srcu_read_lock(&vcpu->kvm->srcu);
1068 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1069 bytes, &run->mmio.data);
1071 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1074 kvmppc_complete_mmio_load(vcpu, run);
1075 vcpu->mmio_needed = 0;
1076 return EMULATE_DONE;
1079 return EMULATE_DO_MMIO;
1082 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1083 unsigned int rt, unsigned int bytes,
1084 int is_default_endian)
1086 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1088 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1090 /* Same as above, but sign extends */
1091 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1092 unsigned int rt, unsigned int bytes,
1093 int is_default_endian)
1095 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1099 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1100 unsigned int rt, unsigned int bytes,
1101 int is_default_endian, int mmio_sign_extend)
1103 enum emulation_result emulated = EMULATE_DONE;
1105 /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
1106 if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1107 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1108 return EMULATE_FAIL;
1111 while (vcpu->arch.mmio_vsx_copy_nums) {
1112 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1113 is_default_endian, mmio_sign_extend);
1115 if (emulated != EMULATE_DONE)
1118 vcpu->arch.paddr_accessed += run->mmio.len;
1120 vcpu->arch.mmio_vsx_copy_nums--;
1121 vcpu->arch.mmio_vsx_offset++;
1125 #endif /* CONFIG_VSX */
1127 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1128 u64 val, unsigned int bytes, int is_default_endian)
1130 void *data = run->mmio.data;
1134 /* Pity C doesn't have a logical XOR operator */
1135 if (kvmppc_need_byteswap(vcpu)) {
1136 host_swabbed = is_default_endian;
1138 host_swabbed = !is_default_endian;
1141 if (bytes > sizeof(run->mmio.data)) {
1142 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1146 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1147 run->mmio.len = bytes;
1148 run->mmio.is_write = 1;
1149 vcpu->mmio_needed = 1;
1150 vcpu->mmio_is_write = 1;
1152 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1153 val = dp_to_sp(val);
1155 /* Store the value at the lowest bytes in 'data'. */
1156 if (!host_swabbed) {
1158 case 8: *(u64 *)data = val; break;
1159 case 4: *(u32 *)data = val; break;
1160 case 2: *(u16 *)data = val; break;
1161 case 1: *(u8 *)data = val; break;
1165 case 8: *(u64 *)data = swab64(val); break;
1166 case 4: *(u32 *)data = swab32(val); break;
1167 case 2: *(u16 *)data = swab16(val); break;
1168 case 1: *(u8 *)data = val; break;
1172 idx = srcu_read_lock(&vcpu->kvm->srcu);
1174 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1175 bytes, &run->mmio.data);
1177 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1180 vcpu->mmio_needed = 0;
1181 return EMULATE_DONE;
1184 return EMULATE_DO_MMIO;
1186 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1189 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1191 u32 dword_offset, word_offset;
1192 union kvmppc_one_reg reg;
1194 int copy_type = vcpu->arch.mmio_vsx_copy_type;
1197 switch (copy_type) {
1198 case KVMPPC_VSX_COPY_DWORD:
1200 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1202 if (vsx_offset == -1) {
1207 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1208 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1210 reg.vval = VCPU_VSX_VR(vcpu, rs);
1211 *val = reg.vsxval[vsx_offset];
1215 case KVMPPC_VSX_COPY_WORD:
1217 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1219 if (vsx_offset == -1) {
1224 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1225 dword_offset = vsx_offset / 2;
1226 word_offset = vsx_offset % 2;
1227 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1228 *val = reg.vsx32val[word_offset];
1230 reg.vval = VCPU_VSX_VR(vcpu, rs);
1231 *val = reg.vsx32val[vsx_offset];
1243 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1244 int rs, unsigned int bytes, int is_default_endian)
1247 enum emulation_result emulated = EMULATE_DONE;
1249 vcpu->arch.io_gpr = rs;
1251 /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
1252 if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1253 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1254 return EMULATE_FAIL;
1257 while (vcpu->arch.mmio_vsx_copy_nums) {
1258 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1259 return EMULATE_FAIL;
1261 emulated = kvmppc_handle_store(run, vcpu,
1262 val, bytes, is_default_endian);
1264 if (emulated != EMULATE_DONE)
1267 vcpu->arch.paddr_accessed += run->mmio.len;
1269 vcpu->arch.mmio_vsx_copy_nums--;
1270 vcpu->arch.mmio_vsx_offset++;
1276 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1277 struct kvm_run *run)
1279 enum emulation_result emulated = EMULATE_FAIL;
1282 vcpu->arch.paddr_accessed += run->mmio.len;
1284 if (!vcpu->mmio_is_write) {
1285 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1286 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1288 emulated = kvmppc_handle_vsx_store(run, vcpu,
1289 vcpu->arch.io_gpr, run->mmio.len, 1);
1293 case EMULATE_DO_MMIO:
1294 run->exit_reason = KVM_EXIT_MMIO;
1298 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1299 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1300 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1309 #endif /* CONFIG_VSX */
1311 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1314 union kvmppc_one_reg val;
1317 size = one_reg_size(reg->id);
1318 if (size > sizeof(val))
1321 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1325 #ifdef CONFIG_ALTIVEC
1326 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1327 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1331 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1333 case KVM_REG_PPC_VSCR:
1334 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1338 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1340 case KVM_REG_PPC_VRSAVE:
1341 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1343 #endif /* CONFIG_ALTIVEC */
1353 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1359 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1362 union kvmppc_one_reg val;
1365 size = one_reg_size(reg->id);
1366 if (size > sizeof(val))
1369 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1372 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1376 #ifdef CONFIG_ALTIVEC
1377 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1378 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1382 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1384 case KVM_REG_PPC_VSCR:
1385 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1389 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1391 case KVM_REG_PPC_VRSAVE:
1392 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1396 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1398 #endif /* CONFIG_ALTIVEC */
1408 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1413 if (vcpu->mmio_needed) {
1414 vcpu->mmio_needed = 0;
1415 if (!vcpu->mmio_is_write)
1416 kvmppc_complete_mmio_load(vcpu, run);
1418 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1419 vcpu->arch.mmio_vsx_copy_nums--;
1420 vcpu->arch.mmio_vsx_offset++;
1423 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1424 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1425 if (r == RESUME_HOST) {
1426 vcpu->mmio_needed = 1;
1431 } else if (vcpu->arch.osi_needed) {
1432 u64 *gprs = run->osi.gprs;
1435 for (i = 0; i < 32; i++)
1436 kvmppc_set_gpr(vcpu, i, gprs[i]);
1437 vcpu->arch.osi_needed = 0;
1438 } else if (vcpu->arch.hcall_needed) {
1441 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1442 for (i = 0; i < 9; ++i)
1443 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1444 vcpu->arch.hcall_needed = 0;
1446 } else if (vcpu->arch.epr_needed) {
1447 kvmppc_set_epr(vcpu, run->epr.epr);
1448 vcpu->arch.epr_needed = 0;
1452 if (vcpu->sigset_active)
1453 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1455 if (run->immediate_exit)
1458 r = kvmppc_vcpu_run(run, vcpu);
1460 if (vcpu->sigset_active)
1461 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1466 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1468 if (irq->irq == KVM_INTERRUPT_UNSET) {
1469 kvmppc_core_dequeue_external(vcpu);
1473 kvmppc_core_queue_external(vcpu, irq);
1475 kvm_vcpu_kick(vcpu);
1480 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1481 struct kvm_enable_cap *cap)
1489 case KVM_CAP_PPC_OSI:
1491 vcpu->arch.osi_enabled = true;
1493 case KVM_CAP_PPC_PAPR:
1495 vcpu->arch.papr_enabled = true;
1497 case KVM_CAP_PPC_EPR:
1500 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1502 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1505 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1507 vcpu->arch.watchdog_enabled = true;
1510 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1511 case KVM_CAP_SW_TLB: {
1512 struct kvm_config_tlb cfg;
1513 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1516 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1519 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1523 #ifdef CONFIG_KVM_MPIC
1524 case KVM_CAP_IRQ_MPIC: {
1526 struct kvm_device *dev;
1529 f = fdget(cap->args[0]);
1534 dev = kvm_device_from_filp(f.file);
1536 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1542 #ifdef CONFIG_KVM_XICS
1543 case KVM_CAP_IRQ_XICS: {
1545 struct kvm_device *dev;
1548 f = fdget(cap->args[0]);
1553 dev = kvm_device_from_filp(f.file);
1556 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1558 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1564 #endif /* CONFIG_KVM_XICS */
1565 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1566 case KVM_CAP_PPC_FWNMI:
1568 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1571 vcpu->kvm->arch.fwnmi_enabled = true;
1573 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1580 r = kvmppc_sanity_check(vcpu);
1585 bool kvm_arch_intc_initialized(struct kvm *kvm)
1587 #ifdef CONFIG_KVM_MPIC
1591 #ifdef CONFIG_KVM_XICS
1592 if (kvm->arch.xics || kvm->arch.xive)
1598 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1599 struct kvm_mp_state *mp_state)
1604 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1605 struct kvm_mp_state *mp_state)
1610 long kvm_arch_vcpu_ioctl(struct file *filp,
1611 unsigned int ioctl, unsigned long arg)
1613 struct kvm_vcpu *vcpu = filp->private_data;
1614 void __user *argp = (void __user *)arg;
1618 case KVM_INTERRUPT: {
1619 struct kvm_interrupt irq;
1621 if (copy_from_user(&irq, argp, sizeof(irq)))
1623 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1627 case KVM_ENABLE_CAP:
1629 struct kvm_enable_cap cap;
1631 if (copy_from_user(&cap, argp, sizeof(cap)))
1633 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1637 case KVM_SET_ONE_REG:
1638 case KVM_GET_ONE_REG:
1640 struct kvm_one_reg reg;
1642 if (copy_from_user(®, argp, sizeof(reg)))
1644 if (ioctl == KVM_SET_ONE_REG)
1645 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1647 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1651 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1652 case KVM_DIRTY_TLB: {
1653 struct kvm_dirty_tlb dirty;
1655 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1657 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1669 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1671 return VM_FAULT_SIGBUS;
1674 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1676 u32 inst_nop = 0x60000000;
1677 #ifdef CONFIG_KVM_BOOKE_HV
1678 u32 inst_sc1 = 0x44000022;
1679 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1680 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1681 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1682 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1684 u32 inst_lis = 0x3c000000;
1685 u32 inst_ori = 0x60000000;
1686 u32 inst_sc = 0x44000002;
1687 u32 inst_imm_mask = 0xffff;
1690 * The hypercall to get into KVM from within guest context is as
1693 * lis r0, r0, KVM_SC_MAGIC_R0@h
1694 * ori r0, KVM_SC_MAGIC_R0@l
1698 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1699 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1700 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1701 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1704 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1709 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1712 if (!irqchip_in_kernel(kvm))
1715 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1716 irq_event->irq, irq_event->level,
1722 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1723 struct kvm_enable_cap *cap)
1731 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1732 case KVM_CAP_PPC_ENABLE_HCALL: {
1733 unsigned long hcall = cap->args[0];
1736 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1739 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1742 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1744 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1748 case KVM_CAP_PPC_SMT: {
1749 unsigned long mode = cap->args[0];
1750 unsigned long flags = cap->args[1];
1753 if (kvm->arch.kvm_ops->set_smt_mode)
1754 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
1766 long kvm_arch_vm_ioctl(struct file *filp,
1767 unsigned int ioctl, unsigned long arg)
1769 struct kvm *kvm __maybe_unused = filp->private_data;
1770 void __user *argp = (void __user *)arg;
1774 case KVM_PPC_GET_PVINFO: {
1775 struct kvm_ppc_pvinfo pvinfo;
1776 memset(&pvinfo, 0, sizeof(pvinfo));
1777 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1778 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1785 case KVM_ENABLE_CAP:
1787 struct kvm_enable_cap cap;
1789 if (copy_from_user(&cap, argp, sizeof(cap)))
1791 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1794 #ifdef CONFIG_SPAPR_TCE_IOMMU
1795 case KVM_CREATE_SPAPR_TCE_64: {
1796 struct kvm_create_spapr_tce_64 create_tce_64;
1799 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1801 if (create_tce_64.flags) {
1805 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1808 case KVM_CREATE_SPAPR_TCE: {
1809 struct kvm_create_spapr_tce create_tce;
1810 struct kvm_create_spapr_tce_64 create_tce_64;
1813 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1816 create_tce_64.liobn = create_tce.liobn;
1817 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1818 create_tce_64.offset = 0;
1819 create_tce_64.size = create_tce.window_size >>
1820 IOMMU_PAGE_SHIFT_4K;
1821 create_tce_64.flags = 0;
1822 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1826 #ifdef CONFIG_PPC_BOOK3S_64
1827 case KVM_PPC_GET_SMMU_INFO: {
1828 struct kvm_ppc_smmu_info info;
1829 struct kvm *kvm = filp->private_data;
1831 memset(&info, 0, sizeof(info));
1832 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1833 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1837 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1838 struct kvm *kvm = filp->private_data;
1840 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1843 case KVM_PPC_CONFIGURE_V3_MMU: {
1844 struct kvm *kvm = filp->private_data;
1845 struct kvm_ppc_mmuv3_cfg cfg;
1848 if (!kvm->arch.kvm_ops->configure_mmu)
1851 if (copy_from_user(&cfg, argp, sizeof(cfg)))
1853 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
1856 case KVM_PPC_GET_RMMU_INFO: {
1857 struct kvm *kvm = filp->private_data;
1858 struct kvm_ppc_rmmu_info info;
1861 if (!kvm->arch.kvm_ops->get_rmmu_info)
1863 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
1864 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1869 struct kvm *kvm = filp->private_data;
1870 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1872 #else /* CONFIG_PPC_BOOK3S_64 */
1881 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1882 static unsigned long nr_lpids;
1884 long kvmppc_alloc_lpid(void)
1889 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1890 if (lpid >= nr_lpids) {
1891 pr_err("%s: No LPIDs free\n", __func__);
1894 } while (test_and_set_bit(lpid, lpid_inuse));
1898 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1900 void kvmppc_claim_lpid(long lpid)
1902 set_bit(lpid, lpid_inuse);
1904 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1906 void kvmppc_free_lpid(long lpid)
1908 clear_bit(lpid, lpid_inuse);
1910 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1912 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1914 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1915 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1917 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1919 int kvm_arch_init(void *opaque)
1924 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);