1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
34 #include <asm/ultravisor.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops *kvmppc_hv_ops;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45 struct kvmppc_ops *kvmppc_pr_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
54 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56 return kvm_arch_vcpu_runnable(vcpu);
59 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
70 * Common checks before entering the guest world. Call with interrupts
75 * == 1 if we're ready to go into guest state
76 * <= 0 if we need to go back to the host with return value
78 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
82 WARN_ON(irqs_disabled());
93 if (signal_pending(current)) {
94 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
95 vcpu->run->exit_reason = KVM_EXIT_INTR;
100 vcpu->mode = IN_GUEST_MODE;
103 * Reading vcpu->requests must happen after setting vcpu->mode,
104 * so we don't miss a request because the requester sees
105 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
106 * before next entering the guest (and thus doesn't IPI).
107 * This also orders the write to mode from any reads
108 * to the page tables done while the VCPU is running.
109 * Please see the comment in kvm_flush_remote_tlbs.
113 if (kvm_request_pending(vcpu)) {
114 /* Make sure we process requests preemptable */
116 trace_kvm_check_requests(vcpu);
117 r = kvmppc_core_check_requests(vcpu);
124 if (kvmppc_core_prepare_to_enter(vcpu)) {
125 /* interrupts got enabled in between, so we
126 are back at square 1 */
130 guest_enter_irqoff();
138 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
140 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
141 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
146 shared->sprg0 = swab64(shared->sprg0);
147 shared->sprg1 = swab64(shared->sprg1);
148 shared->sprg2 = swab64(shared->sprg2);
149 shared->sprg3 = swab64(shared->sprg3);
150 shared->srr0 = swab64(shared->srr0);
151 shared->srr1 = swab64(shared->srr1);
152 shared->dar = swab64(shared->dar);
153 shared->msr = swab64(shared->msr);
154 shared->dsisr = swab32(shared->dsisr);
155 shared->int_pending = swab32(shared->int_pending);
156 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
157 shared->sr[i] = swab32(shared->sr[i]);
161 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163 int nr = kvmppc_get_gpr(vcpu, 11);
165 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
166 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
167 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
168 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
169 unsigned long r2 = 0;
171 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173 param1 &= 0xffffffff;
174 param2 &= 0xffffffff;
175 param3 &= 0xffffffff;
176 param4 &= 0xffffffff;
180 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
182 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183 /* Book3S can be little endian, find it out here */
184 int shared_big_endian = true;
185 if (vcpu->arch.intr_msr & MSR_LE)
186 shared_big_endian = false;
187 if (shared_big_endian != vcpu->arch.shared_big_endian)
188 kvmppc_swab_shared(vcpu);
189 vcpu->arch.shared_big_endian = shared_big_endian;
192 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194 * Older versions of the Linux magic page code had
195 * a bug where they would map their trampoline code
196 * NX. If that's the case, remove !PR NX capability.
198 vcpu->arch.disable_kernel_nx = true;
199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
205 #ifdef CONFIG_PPC_64K_PAGES
207 * Make sure our 4k magic page is in the same window of a 64k
208 * page within the guest and within the host's page.
210 if ((vcpu->arch.magic_page_pa & 0xf000) !=
211 ((ulong)vcpu->arch.shared & 0xf000)) {
212 void *old_shared = vcpu->arch.shared;
213 ulong shared = (ulong)vcpu->arch.shared;
217 shared |= vcpu->arch.magic_page_pa & 0xf000;
218 new_shared = (void*)shared;
219 memcpy(new_shared, old_shared, 0x1000);
220 vcpu->arch.shared = new_shared;
224 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
229 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
235 /* Second return value is in r4 */
237 case EV_HCALL_TOKEN(EV_IDLE):
240 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
243 r = EV_UNIMPLEMENTED;
247 kvmppc_set_gpr(vcpu, 4, r2);
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
257 /* We have to know what CPU to virtualize */
261 /* PAPR only works with book3s_64 */
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
269 #ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
278 return r ? 0 : -EINVAL;
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
284 enum emulation_result er;
287 er = kvmppc_emulate_loadstore(vcpu);
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
297 case EMULATE_DO_MMIO:
298 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 /* XXX Deliver Program interrupt to guest. */
311 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
322 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
324 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
328 struct kvmppc_pte pte;
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
337 if ((!r) || (r == -EAGAIN))
340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
350 /* Magic page override */
351 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
352 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
353 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
354 void *magic = vcpu->arch.shared;
355 magic += pte.eaddr & 0xfff;
356 memcpy(magic, ptr, size);
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
361 return EMULATE_DO_MMIO;
365 EXPORT_SYMBOL_GPL(kvmppc_st);
367 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
371 struct kvmppc_pte pte;
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
380 if ((!rc) || (rc == -EAGAIN))
383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
393 if (!data && !pte.may_execute)
396 /* Magic page override */
397 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
398 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
399 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
400 void *magic = vcpu->arch.shared;
401 magic += pte.eaddr & 0xfff;
402 memcpy(ptr, magic, size);
406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
407 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
410 return EMULATE_DO_MMIO;
414 EXPORT_SYMBOL_GPL(kvmppc_ld);
416 int kvm_arch_hardware_enable(void)
421 int kvm_arch_hardware_setup(void *opaque)
426 int kvm_arch_check_processor_compat(void *opaque)
428 return kvmppc_core_check_processor_compat();
431 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
433 struct kvmppc_ops *kvm_ops = NULL;
435 * if we have both HV and PR enabled, default is HV
439 kvm_ops = kvmppc_hv_ops;
441 kvm_ops = kvmppc_pr_ops;
444 } else if (type == KVM_VM_PPC_HV) {
447 kvm_ops = kvmppc_hv_ops;
448 } else if (type == KVM_VM_PPC_PR) {
451 kvm_ops = kvmppc_pr_ops;
455 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
458 kvm->arch.kvm_ops = kvm_ops;
459 return kvmppc_core_init_vm(kvm);
464 void kvm_arch_destroy_vm(struct kvm *kvm)
466 #ifdef CONFIG_KVM_XICS
468 * We call kick_all_cpus_sync() to ensure that all
469 * CPUs have executed any pending IPIs before we
470 * continue and free VCPUs structures below.
472 if (is_kvmppc_hv_enabled(kvm))
473 kick_all_cpus_sync();
476 kvm_destroy_vcpus(kvm);
478 mutex_lock(&kvm->lock);
480 kvmppc_core_destroy_vm(kvm);
482 mutex_unlock(&kvm->lock);
484 /* drop the module reference */
485 module_put(kvm->arch.kvm_ops->owner);
488 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
491 /* Assume we're using HV mode when the HV module is loaded */
492 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
496 * Hooray - we know which VM type we're running on. Depend on
497 * that rather than the guess above.
499 hv_enabled = is_kvmppc_hv_enabled(kvm);
504 case KVM_CAP_PPC_BOOKE_SREGS:
505 case KVM_CAP_PPC_BOOKE_WATCHDOG:
506 case KVM_CAP_PPC_EPR:
508 case KVM_CAP_PPC_SEGSTATE:
509 case KVM_CAP_PPC_HIOR:
510 case KVM_CAP_PPC_PAPR:
512 case KVM_CAP_PPC_UNSET_IRQ:
513 case KVM_CAP_PPC_IRQ_LEVEL:
514 case KVM_CAP_ENABLE_CAP:
515 case KVM_CAP_ONE_REG:
516 case KVM_CAP_IOEVENTFD:
517 case KVM_CAP_DEVICE_CTRL:
518 case KVM_CAP_IMMEDIATE_EXIT:
519 case KVM_CAP_SET_GUEST_DEBUG:
522 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
523 case KVM_CAP_PPC_PAIRED_SINGLES:
524 case KVM_CAP_PPC_OSI:
525 case KVM_CAP_PPC_GET_PVINFO:
526 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
529 /* We support this only for PR */
532 #ifdef CONFIG_KVM_MPIC
533 case KVM_CAP_IRQ_MPIC:
538 #ifdef CONFIG_PPC_BOOK3S_64
539 case KVM_CAP_SPAPR_TCE:
540 case KVM_CAP_SPAPR_TCE_64:
543 case KVM_CAP_SPAPR_TCE_VFIO:
544 r = !!cpu_has_feature(CPU_FTR_HVMODE);
546 case KVM_CAP_PPC_RTAS:
547 case KVM_CAP_PPC_FIXUP_HCALL:
548 case KVM_CAP_PPC_ENABLE_HCALL:
549 #ifdef CONFIG_KVM_XICS
550 case KVM_CAP_IRQ_XICS:
552 case KVM_CAP_PPC_GET_CPU_CHAR:
555 #ifdef CONFIG_KVM_XIVE
556 case KVM_CAP_PPC_IRQ_XIVE:
558 * We need XIVE to be enabled on the platform (implies
559 * a POWER9 processor) and the PowerNV platform, as
560 * nested is not yet supported.
562 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
563 kvmppc_xive_native_supported();
567 case KVM_CAP_PPC_ALLOC_HTAB:
570 #endif /* CONFIG_PPC_BOOK3S_64 */
571 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
572 case KVM_CAP_PPC_SMT:
575 if (kvm->arch.emul_smt_mode > 1)
576 r = kvm->arch.emul_smt_mode;
578 r = kvm->arch.smt_mode;
579 } else if (hv_enabled) {
580 if (cpu_has_feature(CPU_FTR_ARCH_300))
583 r = threads_per_subcore;
586 case KVM_CAP_PPC_SMT_POSSIBLE:
589 if (!cpu_has_feature(CPU_FTR_ARCH_300))
590 r = ((threads_per_subcore << 1) - 1);
592 /* P9 can emulate dbells, so allow any mode */
596 case KVM_CAP_PPC_RMA:
599 case KVM_CAP_PPC_HWRNG:
600 r = kvmppc_hwrng_present();
602 case KVM_CAP_PPC_MMU_RADIX:
603 r = !!(hv_enabled && radix_enabled());
605 case KVM_CAP_PPC_MMU_HASH_V3:
606 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
607 kvmppc_hv_ops->hash_v3_possible());
609 case KVM_CAP_PPC_NESTED_HV:
610 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
611 !kvmppc_hv_ops->enable_nested(NULL));
614 case KVM_CAP_SYNC_MMU:
615 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
617 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
623 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
624 case KVM_CAP_PPC_HTAB_FD:
628 case KVM_CAP_NR_VCPUS:
630 * Recommending a number of CPUs is somewhat arbitrary; we
631 * return the number of present CPUs for -HV (since a host
632 * will have secondary threads "offline"), and for other KVM
633 * implementations just count online CPUs.
636 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
638 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
640 case KVM_CAP_MAX_VCPUS:
643 case KVM_CAP_MAX_VCPU_ID:
644 r = KVM_MAX_VCPU_IDS;
646 #ifdef CONFIG_PPC_BOOK3S_64
647 case KVM_CAP_PPC_GET_SMMU_INFO:
650 case KVM_CAP_SPAPR_MULTITCE:
653 case KVM_CAP_SPAPR_RESIZE_HPT:
657 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
658 case KVM_CAP_PPC_FWNMI:
662 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
663 case KVM_CAP_PPC_HTM:
664 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
665 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
668 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
669 case KVM_CAP_PPC_SECURE_GUEST:
670 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
671 !kvmppc_hv_ops->enable_svm(NULL);
673 case KVM_CAP_PPC_DAWR1:
674 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
675 !kvmppc_hv_ops->enable_dawr1(NULL));
677 case KVM_CAP_PPC_RPT_INVALIDATE:
689 long kvm_arch_dev_ioctl(struct file *filp,
690 unsigned int ioctl, unsigned long arg)
695 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
697 kvmppc_core_free_memslot(kvm, slot);
700 int kvm_arch_prepare_memory_region(struct kvm *kvm,
701 const struct kvm_memory_slot *old,
702 struct kvm_memory_slot *new,
703 enum kvm_mr_change change)
705 return kvmppc_core_prepare_memory_region(kvm, old, new, change);
708 void kvm_arch_commit_memory_region(struct kvm *kvm,
709 struct kvm_memory_slot *old,
710 const struct kvm_memory_slot *new,
711 enum kvm_mr_change change)
713 kvmppc_core_commit_memory_region(kvm, old, new, change);
716 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
717 struct kvm_memory_slot *slot)
719 kvmppc_core_flush_memslot(kvm, slot);
722 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
727 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
729 struct kvm_vcpu *vcpu;
731 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
732 kvmppc_decrementer_func(vcpu);
734 return HRTIMER_NORESTART;
737 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
741 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
742 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
743 vcpu->arch.dec_expires = get_tb();
745 #ifdef CONFIG_KVM_EXIT_TIMING
746 mutex_init(&vcpu->arch.exit_timing_lock);
748 err = kvmppc_subarch_vcpu_init(vcpu);
752 err = kvmppc_core_vcpu_create(vcpu);
754 goto out_vcpu_uninit;
756 rcuwait_init(&vcpu->arch.wait);
757 vcpu->arch.waitp = &vcpu->arch.wait;
758 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
762 kvmppc_subarch_vcpu_uninit(vcpu);
766 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
770 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
772 /* Make sure we're not using the vcpu anymore */
773 hrtimer_cancel(&vcpu->arch.dec_timer);
775 kvmppc_remove_vcpu_debugfs(vcpu);
777 switch (vcpu->arch.irq_type) {
778 case KVMPPC_IRQ_MPIC:
779 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
781 case KVMPPC_IRQ_XICS:
783 kvmppc_xive_cleanup_vcpu(vcpu);
785 kvmppc_xics_free_icp(vcpu);
787 case KVMPPC_IRQ_XIVE:
788 kvmppc_xive_native_cleanup_vcpu(vcpu);
792 kvmppc_core_vcpu_free(vcpu);
794 kvmppc_subarch_vcpu_uninit(vcpu);
797 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
799 return kvmppc_core_pending_dec(vcpu);
802 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
806 * vrsave (formerly usprg0) isn't used by Linux, but may
807 * be used by the guest.
809 * On non-booke this is associated with Altivec and
810 * is handled by code in book3s.c.
812 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
814 kvmppc_core_vcpu_load(vcpu, cpu);
817 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
819 kvmppc_core_vcpu_put(vcpu);
821 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
826 * irq_bypass_add_producer and irq_bypass_del_producer are only
827 * useful if the architecture supports PCI passthrough.
828 * irq_bypass_stop and irq_bypass_start are not needed and so
829 * kvm_ops are not defined for them.
831 bool kvm_arch_has_irq_bypass(void)
833 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
834 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
837 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
838 struct irq_bypass_producer *prod)
840 struct kvm_kernel_irqfd *irqfd =
841 container_of(cons, struct kvm_kernel_irqfd, consumer);
842 struct kvm *kvm = irqfd->kvm;
844 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
845 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
850 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
851 struct irq_bypass_producer *prod)
853 struct kvm_kernel_irqfd *irqfd =
854 container_of(cons, struct kvm_kernel_irqfd, consumer);
855 struct kvm *kvm = irqfd->kvm;
857 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
858 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
862 static inline int kvmppc_get_vsr_dword_offset(int index)
866 if ((index != 0) && (index != 1))
878 static inline int kvmppc_get_vsr_word_offset(int index)
882 if ((index > 3) || (index < 0))
893 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
896 union kvmppc_one_reg val;
897 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
898 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
904 val.vval = VCPU_VSX_VR(vcpu, index - 32);
905 val.vsxval[offset] = gpr;
906 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
908 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
912 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
915 union kvmppc_one_reg val;
916 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
919 val.vval = VCPU_VSX_VR(vcpu, index - 32);
922 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
924 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
925 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
929 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
932 union kvmppc_one_reg val;
933 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
936 val.vsx32val[0] = gpr;
937 val.vsx32val[1] = gpr;
938 val.vsx32val[2] = gpr;
939 val.vsx32val[3] = gpr;
940 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
942 val.vsx32val[0] = gpr;
943 val.vsx32val[1] = gpr;
944 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
945 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
949 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
952 union kvmppc_one_reg val;
953 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
954 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
955 int dword_offset, word_offset;
961 val.vval = VCPU_VSX_VR(vcpu, index - 32);
962 val.vsx32val[offset] = gpr32;
963 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
965 dword_offset = offset / 2;
966 word_offset = offset % 2;
967 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
968 val.vsx32val[word_offset] = gpr32;
969 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
972 #endif /* CONFIG_VSX */
974 #ifdef CONFIG_ALTIVEC
975 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
976 int index, int element_size)
979 int elts = sizeof(vector128)/element_size;
981 if ((index < 0) || (index >= elts))
984 if (kvmppc_need_byteswap(vcpu))
985 offset = elts - index - 1;
992 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
995 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
998 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1001 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1004 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1007 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1010 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1013 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1017 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1020 union kvmppc_one_reg val;
1021 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1022 vcpu->arch.mmio_vmx_offset);
1023 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1028 val.vval = VCPU_VSX_VR(vcpu, index);
1029 val.vsxval[offset] = gpr;
1030 VCPU_VSX_VR(vcpu, index) = val.vval;
1033 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1036 union kvmppc_one_reg val;
1037 int offset = kvmppc_get_vmx_word_offset(vcpu,
1038 vcpu->arch.mmio_vmx_offset);
1039 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1044 val.vval = VCPU_VSX_VR(vcpu, index);
1045 val.vsx32val[offset] = gpr32;
1046 VCPU_VSX_VR(vcpu, index) = val.vval;
1049 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1052 union kvmppc_one_reg val;
1053 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1054 vcpu->arch.mmio_vmx_offset);
1055 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1060 val.vval = VCPU_VSX_VR(vcpu, index);
1061 val.vsx16val[offset] = gpr16;
1062 VCPU_VSX_VR(vcpu, index) = val.vval;
1065 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1068 union kvmppc_one_reg val;
1069 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1070 vcpu->arch.mmio_vmx_offset);
1071 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1076 val.vval = VCPU_VSX_VR(vcpu, index);
1077 val.vsx8val[offset] = gpr8;
1078 VCPU_VSX_VR(vcpu, index) = val.vval;
1080 #endif /* CONFIG_ALTIVEC */
1082 #ifdef CONFIG_PPC_FPU
1083 static inline u64 sp_to_dp(u32 fprs)
1089 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1095 static inline u32 dp_to_sp(u64 fprd)
1101 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1108 #define sp_to_dp(x) (x)
1109 #define dp_to_sp(x) (x)
1110 #endif /* CONFIG_PPC_FPU */
1112 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1114 struct kvm_run *run = vcpu->run;
1117 if (run->mmio.len > sizeof(gpr)) {
1118 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1122 if (!vcpu->arch.mmio_host_swabbed) {
1123 switch (run->mmio.len) {
1124 case 8: gpr = *(u64 *)run->mmio.data; break;
1125 case 4: gpr = *(u32 *)run->mmio.data; break;
1126 case 2: gpr = *(u16 *)run->mmio.data; break;
1127 case 1: gpr = *(u8 *)run->mmio.data; break;
1130 switch (run->mmio.len) {
1131 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1132 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1133 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1134 case 1: gpr = *(u8 *)run->mmio.data; break;
1138 /* conversion between single and double precision */
1139 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1140 gpr = sp_to_dp(gpr);
1142 if (vcpu->arch.mmio_sign_extend) {
1143 switch (run->mmio.len) {
1146 gpr = (s64)(s32)gpr;
1150 gpr = (s64)(s16)gpr;
1158 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1159 case KVM_MMIO_REG_GPR:
1160 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1162 case KVM_MMIO_REG_FPR:
1163 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1164 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1166 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1168 #ifdef CONFIG_PPC_BOOK3S
1169 case KVM_MMIO_REG_QPR:
1170 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1172 case KVM_MMIO_REG_FQPR:
1173 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1174 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1178 case KVM_MMIO_REG_VSX:
1179 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1180 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1182 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1183 kvmppc_set_vsr_dword(vcpu, gpr);
1184 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1185 kvmppc_set_vsr_word(vcpu, gpr);
1186 else if (vcpu->arch.mmio_copy_type ==
1187 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1188 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1189 else if (vcpu->arch.mmio_copy_type ==
1190 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1191 kvmppc_set_vsr_word_dump(vcpu, gpr);
1194 #ifdef CONFIG_ALTIVEC
1195 case KVM_MMIO_REG_VMX:
1196 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1197 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1199 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1200 kvmppc_set_vmx_dword(vcpu, gpr);
1201 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1202 kvmppc_set_vmx_word(vcpu, gpr);
1203 else if (vcpu->arch.mmio_copy_type ==
1204 KVMPPC_VMX_COPY_HWORD)
1205 kvmppc_set_vmx_hword(vcpu, gpr);
1206 else if (vcpu->arch.mmio_copy_type ==
1207 KVMPPC_VMX_COPY_BYTE)
1208 kvmppc_set_vmx_byte(vcpu, gpr);
1211 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1212 case KVM_MMIO_REG_NESTED_GPR:
1213 if (kvmppc_need_byteswap(vcpu))
1215 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1224 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1225 unsigned int rt, unsigned int bytes,
1226 int is_default_endian, int sign_extend)
1228 struct kvm_run *run = vcpu->run;
1232 /* Pity C doesn't have a logical XOR operator */
1233 if (kvmppc_need_byteswap(vcpu)) {
1234 host_swabbed = is_default_endian;
1236 host_swabbed = !is_default_endian;
1239 if (bytes > sizeof(run->mmio.data)) {
1240 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1244 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1245 run->mmio.len = bytes;
1246 run->mmio.is_write = 0;
1248 vcpu->arch.io_gpr = rt;
1249 vcpu->arch.mmio_host_swabbed = host_swabbed;
1250 vcpu->mmio_needed = 1;
1251 vcpu->mmio_is_write = 0;
1252 vcpu->arch.mmio_sign_extend = sign_extend;
1254 idx = srcu_read_lock(&vcpu->kvm->srcu);
1256 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1257 bytes, &run->mmio.data);
1259 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1262 kvmppc_complete_mmio_load(vcpu);
1263 vcpu->mmio_needed = 0;
1264 return EMULATE_DONE;
1267 return EMULATE_DO_MMIO;
1270 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1271 unsigned int rt, unsigned int bytes,
1272 int is_default_endian)
1274 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1276 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1278 /* Same as above, but sign extends */
1279 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1280 unsigned int rt, unsigned int bytes,
1281 int is_default_endian)
1283 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1287 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1288 unsigned int rt, unsigned int bytes,
1289 int is_default_endian, int mmio_sign_extend)
1291 enum emulation_result emulated = EMULATE_DONE;
1293 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1294 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1295 return EMULATE_FAIL;
1297 while (vcpu->arch.mmio_vsx_copy_nums) {
1298 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1299 is_default_endian, mmio_sign_extend);
1301 if (emulated != EMULATE_DONE)
1304 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1306 vcpu->arch.mmio_vsx_copy_nums--;
1307 vcpu->arch.mmio_vsx_offset++;
1311 #endif /* CONFIG_VSX */
1313 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1314 u64 val, unsigned int bytes, int is_default_endian)
1316 struct kvm_run *run = vcpu->run;
1317 void *data = run->mmio.data;
1321 /* Pity C doesn't have a logical XOR operator */
1322 if (kvmppc_need_byteswap(vcpu)) {
1323 host_swabbed = is_default_endian;
1325 host_swabbed = !is_default_endian;
1328 if (bytes > sizeof(run->mmio.data)) {
1329 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1333 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1334 run->mmio.len = bytes;
1335 run->mmio.is_write = 1;
1336 vcpu->mmio_needed = 1;
1337 vcpu->mmio_is_write = 1;
1339 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1340 val = dp_to_sp(val);
1342 /* Store the value at the lowest bytes in 'data'. */
1343 if (!host_swabbed) {
1345 case 8: *(u64 *)data = val; break;
1346 case 4: *(u32 *)data = val; break;
1347 case 2: *(u16 *)data = val; break;
1348 case 1: *(u8 *)data = val; break;
1352 case 8: *(u64 *)data = swab64(val); break;
1353 case 4: *(u32 *)data = swab32(val); break;
1354 case 2: *(u16 *)data = swab16(val); break;
1355 case 1: *(u8 *)data = val; break;
1359 idx = srcu_read_lock(&vcpu->kvm->srcu);
1361 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1362 bytes, &run->mmio.data);
1364 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1367 vcpu->mmio_needed = 0;
1368 return EMULATE_DONE;
1371 return EMULATE_DO_MMIO;
1373 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1376 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1378 u32 dword_offset, word_offset;
1379 union kvmppc_one_reg reg;
1381 int copy_type = vcpu->arch.mmio_copy_type;
1384 switch (copy_type) {
1385 case KVMPPC_VSX_COPY_DWORD:
1387 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1389 if (vsx_offset == -1) {
1395 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1397 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1398 *val = reg.vsxval[vsx_offset];
1402 case KVMPPC_VSX_COPY_WORD:
1404 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1406 if (vsx_offset == -1) {
1412 dword_offset = vsx_offset / 2;
1413 word_offset = vsx_offset % 2;
1414 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1415 *val = reg.vsx32val[word_offset];
1417 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1418 *val = reg.vsx32val[vsx_offset];
1430 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1431 int rs, unsigned int bytes, int is_default_endian)
1434 enum emulation_result emulated = EMULATE_DONE;
1436 vcpu->arch.io_gpr = rs;
1438 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1439 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1440 return EMULATE_FAIL;
1442 while (vcpu->arch.mmio_vsx_copy_nums) {
1443 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1444 return EMULATE_FAIL;
1446 emulated = kvmppc_handle_store(vcpu,
1447 val, bytes, is_default_endian);
1449 if (emulated != EMULATE_DONE)
1452 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1454 vcpu->arch.mmio_vsx_copy_nums--;
1455 vcpu->arch.mmio_vsx_offset++;
1461 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1463 struct kvm_run *run = vcpu->run;
1464 enum emulation_result emulated = EMULATE_FAIL;
1467 vcpu->arch.paddr_accessed += run->mmio.len;
1469 if (!vcpu->mmio_is_write) {
1470 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1471 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1473 emulated = kvmppc_handle_vsx_store(vcpu,
1474 vcpu->arch.io_gpr, run->mmio.len, 1);
1478 case EMULATE_DO_MMIO:
1479 run->exit_reason = KVM_EXIT_MMIO;
1483 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1484 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1485 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1494 #endif /* CONFIG_VSX */
1496 #ifdef CONFIG_ALTIVEC
1497 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1498 unsigned int rt, unsigned int bytes, int is_default_endian)
1500 enum emulation_result emulated = EMULATE_DONE;
1502 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1503 return EMULATE_FAIL;
1505 while (vcpu->arch.mmio_vmx_copy_nums) {
1506 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1507 is_default_endian, 0);
1509 if (emulated != EMULATE_DONE)
1512 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1513 vcpu->arch.mmio_vmx_copy_nums--;
1514 vcpu->arch.mmio_vmx_offset++;
1520 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1522 union kvmppc_one_reg reg;
1527 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1529 if (vmx_offset == -1)
1532 reg.vval = VCPU_VSX_VR(vcpu, index);
1533 *val = reg.vsxval[vmx_offset];
1538 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1540 union kvmppc_one_reg reg;
1545 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1547 if (vmx_offset == -1)
1550 reg.vval = VCPU_VSX_VR(vcpu, index);
1551 *val = reg.vsx32val[vmx_offset];
1556 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1558 union kvmppc_one_reg reg;
1563 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1565 if (vmx_offset == -1)
1568 reg.vval = VCPU_VSX_VR(vcpu, index);
1569 *val = reg.vsx16val[vmx_offset];
1574 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1576 union kvmppc_one_reg reg;
1581 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1583 if (vmx_offset == -1)
1586 reg.vval = VCPU_VSX_VR(vcpu, index);
1587 *val = reg.vsx8val[vmx_offset];
1592 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1593 unsigned int rs, unsigned int bytes, int is_default_endian)
1596 unsigned int index = rs & KVM_MMIO_REG_MASK;
1597 enum emulation_result emulated = EMULATE_DONE;
1599 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1600 return EMULATE_FAIL;
1602 vcpu->arch.io_gpr = rs;
1604 while (vcpu->arch.mmio_vmx_copy_nums) {
1605 switch (vcpu->arch.mmio_copy_type) {
1606 case KVMPPC_VMX_COPY_DWORD:
1607 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1608 return EMULATE_FAIL;
1611 case KVMPPC_VMX_COPY_WORD:
1612 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1615 case KVMPPC_VMX_COPY_HWORD:
1616 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1617 return EMULATE_FAIL;
1619 case KVMPPC_VMX_COPY_BYTE:
1620 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1621 return EMULATE_FAIL;
1624 return EMULATE_FAIL;
1627 emulated = kvmppc_handle_store(vcpu, val, bytes,
1629 if (emulated != EMULATE_DONE)
1632 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1633 vcpu->arch.mmio_vmx_copy_nums--;
1634 vcpu->arch.mmio_vmx_offset++;
1640 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1642 struct kvm_run *run = vcpu->run;
1643 enum emulation_result emulated = EMULATE_FAIL;
1646 vcpu->arch.paddr_accessed += run->mmio.len;
1648 if (!vcpu->mmio_is_write) {
1649 emulated = kvmppc_handle_vmx_load(vcpu,
1650 vcpu->arch.io_gpr, run->mmio.len, 1);
1652 emulated = kvmppc_handle_vmx_store(vcpu,
1653 vcpu->arch.io_gpr, run->mmio.len, 1);
1657 case EMULATE_DO_MMIO:
1658 run->exit_reason = KVM_EXIT_MMIO;
1662 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1663 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1664 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1673 #endif /* CONFIG_ALTIVEC */
1675 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1678 union kvmppc_one_reg val;
1681 size = one_reg_size(reg->id);
1682 if (size > sizeof(val))
1685 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1689 #ifdef CONFIG_ALTIVEC
1690 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1695 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1697 case KVM_REG_PPC_VSCR:
1698 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1702 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1704 case KVM_REG_PPC_VRSAVE:
1705 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1707 #endif /* CONFIG_ALTIVEC */
1717 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1723 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1726 union kvmppc_one_reg val;
1729 size = one_reg_size(reg->id);
1730 if (size > sizeof(val))
1733 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1736 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1740 #ifdef CONFIG_ALTIVEC
1741 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1742 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1746 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1748 case KVM_REG_PPC_VSCR:
1749 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1753 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1755 case KVM_REG_PPC_VRSAVE:
1756 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1760 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1762 #endif /* CONFIG_ALTIVEC */
1772 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1774 struct kvm_run *run = vcpu->run;
1779 if (vcpu->mmio_needed) {
1780 vcpu->mmio_needed = 0;
1781 if (!vcpu->mmio_is_write)
1782 kvmppc_complete_mmio_load(vcpu);
1784 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1785 vcpu->arch.mmio_vsx_copy_nums--;
1786 vcpu->arch.mmio_vsx_offset++;
1789 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1790 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1791 if (r == RESUME_HOST) {
1792 vcpu->mmio_needed = 1;
1797 #ifdef CONFIG_ALTIVEC
1798 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1799 vcpu->arch.mmio_vmx_copy_nums--;
1800 vcpu->arch.mmio_vmx_offset++;
1803 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1804 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1805 if (r == RESUME_HOST) {
1806 vcpu->mmio_needed = 1;
1811 } else if (vcpu->arch.osi_needed) {
1812 u64 *gprs = run->osi.gprs;
1815 for (i = 0; i < 32; i++)
1816 kvmppc_set_gpr(vcpu, i, gprs[i]);
1817 vcpu->arch.osi_needed = 0;
1818 } else if (vcpu->arch.hcall_needed) {
1821 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1822 for (i = 0; i < 9; ++i)
1823 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1824 vcpu->arch.hcall_needed = 0;
1826 } else if (vcpu->arch.epr_needed) {
1827 kvmppc_set_epr(vcpu, run->epr.epr);
1828 vcpu->arch.epr_needed = 0;
1832 kvm_sigset_activate(vcpu);
1834 if (run->immediate_exit)
1837 r = kvmppc_vcpu_run(vcpu);
1839 kvm_sigset_deactivate(vcpu);
1841 #ifdef CONFIG_ALTIVEC
1848 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1850 if (irq->irq == KVM_INTERRUPT_UNSET) {
1851 kvmppc_core_dequeue_external(vcpu);
1855 kvmppc_core_queue_external(vcpu, irq);
1857 kvm_vcpu_kick(vcpu);
1862 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1863 struct kvm_enable_cap *cap)
1871 case KVM_CAP_PPC_OSI:
1873 vcpu->arch.osi_enabled = true;
1875 case KVM_CAP_PPC_PAPR:
1877 vcpu->arch.papr_enabled = true;
1879 case KVM_CAP_PPC_EPR:
1882 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1884 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1887 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1889 vcpu->arch.watchdog_enabled = true;
1892 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1893 case KVM_CAP_SW_TLB: {
1894 struct kvm_config_tlb cfg;
1895 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1898 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1901 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1905 #ifdef CONFIG_KVM_MPIC
1906 case KVM_CAP_IRQ_MPIC: {
1908 struct kvm_device *dev;
1911 f = fdget(cap->args[0]);
1916 dev = kvm_device_from_filp(f.file);
1918 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1924 #ifdef CONFIG_KVM_XICS
1925 case KVM_CAP_IRQ_XICS: {
1927 struct kvm_device *dev;
1930 f = fdget(cap->args[0]);
1935 dev = kvm_device_from_filp(f.file);
1938 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1940 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1946 #endif /* CONFIG_KVM_XICS */
1947 #ifdef CONFIG_KVM_XIVE
1948 case KVM_CAP_PPC_IRQ_XIVE: {
1950 struct kvm_device *dev;
1953 f = fdget(cap->args[0]);
1958 if (!xive_enabled())
1962 dev = kvm_device_from_filp(f.file);
1964 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1970 #endif /* CONFIG_KVM_XIVE */
1971 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1972 case KVM_CAP_PPC_FWNMI:
1974 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1977 vcpu->kvm->arch.fwnmi_enabled = true;
1979 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1986 r = kvmppc_sanity_check(vcpu);
1991 bool kvm_arch_intc_initialized(struct kvm *kvm)
1993 #ifdef CONFIG_KVM_MPIC
1997 #ifdef CONFIG_KVM_XICS
1998 if (kvm->arch.xics || kvm->arch.xive)
2004 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2005 struct kvm_mp_state *mp_state)
2010 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2011 struct kvm_mp_state *mp_state)
2016 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2017 unsigned int ioctl, unsigned long arg)
2019 struct kvm_vcpu *vcpu = filp->private_data;
2020 void __user *argp = (void __user *)arg;
2022 if (ioctl == KVM_INTERRUPT) {
2023 struct kvm_interrupt irq;
2024 if (copy_from_user(&irq, argp, sizeof(irq)))
2026 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2028 return -ENOIOCTLCMD;
2031 long kvm_arch_vcpu_ioctl(struct file *filp,
2032 unsigned int ioctl, unsigned long arg)
2034 struct kvm_vcpu *vcpu = filp->private_data;
2035 void __user *argp = (void __user *)arg;
2039 case KVM_ENABLE_CAP:
2041 struct kvm_enable_cap cap;
2043 if (copy_from_user(&cap, argp, sizeof(cap)))
2046 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2051 case KVM_SET_ONE_REG:
2052 case KVM_GET_ONE_REG:
2054 struct kvm_one_reg reg;
2056 if (copy_from_user(®, argp, sizeof(reg)))
2058 if (ioctl == KVM_SET_ONE_REG)
2059 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2061 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2065 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2066 case KVM_DIRTY_TLB: {
2067 struct kvm_dirty_tlb dirty;
2069 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2072 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2085 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2087 return VM_FAULT_SIGBUS;
2090 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2092 u32 inst_nop = 0x60000000;
2093 #ifdef CONFIG_KVM_BOOKE_HV
2094 u32 inst_sc1 = 0x44000022;
2095 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2096 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2097 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2098 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2100 u32 inst_lis = 0x3c000000;
2101 u32 inst_ori = 0x60000000;
2102 u32 inst_sc = 0x44000002;
2103 u32 inst_imm_mask = 0xffff;
2106 * The hypercall to get into KVM from within guest context is as
2109 * lis r0, r0, KVM_SC_MAGIC_R0@h
2110 * ori r0, KVM_SC_MAGIC_R0@l
2114 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2115 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2116 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2117 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2120 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2125 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2128 if (!irqchip_in_kernel(kvm))
2131 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2132 irq_event->irq, irq_event->level,
2138 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2139 struct kvm_enable_cap *cap)
2147 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2148 case KVM_CAP_PPC_ENABLE_HCALL: {
2149 unsigned long hcall = cap->args[0];
2152 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2155 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2158 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2160 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2164 case KVM_CAP_PPC_SMT: {
2165 unsigned long mode = cap->args[0];
2166 unsigned long flags = cap->args[1];
2169 if (kvm->arch.kvm_ops->set_smt_mode)
2170 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2174 case KVM_CAP_PPC_NESTED_HV:
2176 if (!is_kvmppc_hv_enabled(kvm) ||
2177 !kvm->arch.kvm_ops->enable_nested)
2179 r = kvm->arch.kvm_ops->enable_nested(kvm);
2182 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2183 case KVM_CAP_PPC_SECURE_GUEST:
2185 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2187 r = kvm->arch.kvm_ops->enable_svm(kvm);
2189 case KVM_CAP_PPC_DAWR1:
2191 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2193 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2204 #ifdef CONFIG_PPC_BOOK3S_64
2206 * These functions check whether the underlying hardware is safe
2207 * against attacks based on observing the effects of speculatively
2208 * executed instructions, and whether it supplies instructions for
2209 * use in workarounds. The information comes from firmware, either
2210 * via the device tree on powernv platforms or from an hcall on
2211 * pseries platforms.
2213 #ifdef CONFIG_PPC_PSERIES
2214 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2216 struct h_cpu_char_result c;
2219 if (!machine_is(pseries))
2222 rc = plpar_get_cpu_characteristics(&c);
2223 if (rc == H_SUCCESS) {
2224 cp->character = c.character;
2225 cp->behaviour = c.behaviour;
2226 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2227 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2228 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2229 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2230 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2231 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2232 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2233 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2234 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2235 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2236 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2237 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2238 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2243 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2249 static inline bool have_fw_feat(struct device_node *fw_features,
2250 const char *state, const char *name)
2252 struct device_node *np;
2255 np = of_get_child_by_name(fw_features, name);
2257 r = of_property_read_bool(np, state);
2263 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2265 struct device_node *np, *fw_features;
2268 memset(cp, 0, sizeof(*cp));
2269 r = pseries_get_cpu_char(cp);
2273 np = of_find_node_by_name(NULL, "ibm,opal");
2275 fw_features = of_get_child_by_name(np, "fw-features");
2279 if (have_fw_feat(fw_features, "enabled",
2280 "inst-spec-barrier-ori31,31,0"))
2281 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2282 if (have_fw_feat(fw_features, "enabled",
2283 "fw-bcctrl-serialized"))
2284 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2285 if (have_fw_feat(fw_features, "enabled",
2286 "inst-l1d-flush-ori30,30,0"))
2287 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2288 if (have_fw_feat(fw_features, "enabled",
2289 "inst-l1d-flush-trig2"))
2290 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2291 if (have_fw_feat(fw_features, "enabled",
2292 "fw-l1d-thread-split"))
2293 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2294 if (have_fw_feat(fw_features, "enabled",
2295 "fw-count-cache-disabled"))
2296 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2297 if (have_fw_feat(fw_features, "enabled",
2298 "fw-count-cache-flush-bcctr2,0,0"))
2299 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2300 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2301 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2302 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2303 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2304 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2305 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2306 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2308 if (have_fw_feat(fw_features, "enabled",
2309 "speculation-policy-favor-security"))
2310 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2311 if (!have_fw_feat(fw_features, "disabled",
2312 "needs-l1d-flush-msr-pr-0-to-1"))
2313 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2314 if (!have_fw_feat(fw_features, "disabled",
2315 "needs-spec-barrier-for-bound-checks"))
2316 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2317 if (have_fw_feat(fw_features, "enabled",
2318 "needs-count-cache-flush-on-context-switch"))
2319 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2320 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2321 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2322 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2323 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2325 of_node_put(fw_features);
2332 long kvm_arch_vm_ioctl(struct file *filp,
2333 unsigned int ioctl, unsigned long arg)
2335 struct kvm *kvm __maybe_unused = filp->private_data;
2336 void __user *argp = (void __user *)arg;
2340 case KVM_PPC_GET_PVINFO: {
2341 struct kvm_ppc_pvinfo pvinfo;
2342 memset(&pvinfo, 0, sizeof(pvinfo));
2343 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2344 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2351 #ifdef CONFIG_SPAPR_TCE_IOMMU
2352 case KVM_CREATE_SPAPR_TCE_64: {
2353 struct kvm_create_spapr_tce_64 create_tce_64;
2356 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2358 if (create_tce_64.flags) {
2362 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2365 case KVM_CREATE_SPAPR_TCE: {
2366 struct kvm_create_spapr_tce create_tce;
2367 struct kvm_create_spapr_tce_64 create_tce_64;
2370 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2373 create_tce_64.liobn = create_tce.liobn;
2374 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2375 create_tce_64.offset = 0;
2376 create_tce_64.size = create_tce.window_size >>
2377 IOMMU_PAGE_SHIFT_4K;
2378 create_tce_64.flags = 0;
2379 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2383 #ifdef CONFIG_PPC_BOOK3S_64
2384 case KVM_PPC_GET_SMMU_INFO: {
2385 struct kvm_ppc_smmu_info info;
2386 struct kvm *kvm = filp->private_data;
2388 memset(&info, 0, sizeof(info));
2389 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2390 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2394 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2395 struct kvm *kvm = filp->private_data;
2397 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2400 case KVM_PPC_CONFIGURE_V3_MMU: {
2401 struct kvm *kvm = filp->private_data;
2402 struct kvm_ppc_mmuv3_cfg cfg;
2405 if (!kvm->arch.kvm_ops->configure_mmu)
2408 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2410 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2413 case KVM_PPC_GET_RMMU_INFO: {
2414 struct kvm *kvm = filp->private_data;
2415 struct kvm_ppc_rmmu_info info;
2418 if (!kvm->arch.kvm_ops->get_rmmu_info)
2420 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2421 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2425 case KVM_PPC_GET_CPU_CHAR: {
2426 struct kvm_ppc_cpu_char cpuchar;
2428 r = kvmppc_get_cpu_char(&cpuchar);
2429 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2433 case KVM_PPC_SVM_OFF: {
2434 struct kvm *kvm = filp->private_data;
2437 if (!kvm->arch.kvm_ops->svm_off)
2440 r = kvm->arch.kvm_ops->svm_off(kvm);
2444 struct kvm *kvm = filp->private_data;
2445 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2447 #else /* CONFIG_PPC_BOOK3S_64 */
2456 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2457 static unsigned long nr_lpids;
2459 long kvmppc_alloc_lpid(void)
2464 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2465 if (lpid >= nr_lpids) {
2466 pr_err("%s: No LPIDs free\n", __func__);
2469 } while (test_and_set_bit(lpid, lpid_inuse));
2473 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2475 void kvmppc_claim_lpid(long lpid)
2477 set_bit(lpid, lpid_inuse);
2479 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2481 void kvmppc_free_lpid(long lpid)
2483 clear_bit(lpid, lpid_inuse);
2485 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2487 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2489 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2490 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2492 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2494 int kvm_arch_init(void *opaque)
2499 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);