2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/cputhreads.h>
37 #include <asm/irqflags.h>
38 #include <asm/iommu.h>
39 #include <asm/switch_to.h>
41 #ifdef CONFIG_PPC_PSERIES
42 #include <asm/hvcall.h>
43 #include <asm/plpar_wrappers.h>
48 #include "../mm/mmu_decl.h"
50 #define CREATE_TRACE_POINTS
53 struct kvmppc_ops *kvmppc_hv_ops;
54 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
55 struct kvmppc_ops *kvmppc_pr_ops;
56 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
59 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
61 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
64 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
69 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
75 * Common checks before entering the guest world. Call with interrupts
80 * == 1 if we're ready to go into guest state
81 * <= 0 if we need to go back to the host with return value
83 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
87 WARN_ON(irqs_disabled());
98 if (signal_pending(current)) {
99 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
100 vcpu->run->exit_reason = KVM_EXIT_INTR;
105 vcpu->mode = IN_GUEST_MODE;
108 * Reading vcpu->requests must happen after setting vcpu->mode,
109 * so we don't miss a request because the requester sees
110 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
111 * before next entering the guest (and thus doesn't IPI).
112 * This also orders the write to mode from any reads
113 * to the page tables done while the VCPU is running.
114 * Please see the comment in kvm_flush_remote_tlbs.
118 if (kvm_request_pending(vcpu)) {
119 /* Make sure we process requests preemptable */
121 trace_kvm_check_requests(vcpu);
122 r = kvmppc_core_check_requests(vcpu);
129 if (kvmppc_core_prepare_to_enter(vcpu)) {
130 /* interrupts got enabled in between, so we
131 are back at square 1 */
135 guest_enter_irqoff();
143 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
145 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
146 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
148 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
151 shared->sprg0 = swab64(shared->sprg0);
152 shared->sprg1 = swab64(shared->sprg1);
153 shared->sprg2 = swab64(shared->sprg2);
154 shared->sprg3 = swab64(shared->sprg3);
155 shared->srr0 = swab64(shared->srr0);
156 shared->srr1 = swab64(shared->srr1);
157 shared->dar = swab64(shared->dar);
158 shared->msr = swab64(shared->msr);
159 shared->dsisr = swab32(shared->dsisr);
160 shared->int_pending = swab32(shared->int_pending);
161 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
162 shared->sr[i] = swab32(shared->sr[i]);
166 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
168 int nr = kvmppc_get_gpr(vcpu, 11);
170 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
171 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
172 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
173 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
174 unsigned long r2 = 0;
176 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
178 param1 &= 0xffffffff;
179 param2 &= 0xffffffff;
180 param3 &= 0xffffffff;
181 param4 &= 0xffffffff;
185 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
187 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
188 /* Book3S can be little endian, find it out here */
189 int shared_big_endian = true;
190 if (vcpu->arch.intr_msr & MSR_LE)
191 shared_big_endian = false;
192 if (shared_big_endian != vcpu->arch.shared_big_endian)
193 kvmppc_swab_shared(vcpu);
194 vcpu->arch.shared_big_endian = shared_big_endian;
197 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
199 * Older versions of the Linux magic page code had
200 * a bug where they would map their trampoline code
201 * NX. If that's the case, remove !PR NX capability.
203 vcpu->arch.disable_kernel_nx = true;
204 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
207 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
208 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
210 #ifdef CONFIG_PPC_64K_PAGES
212 * Make sure our 4k magic page is in the same window of a 64k
213 * page within the guest and within the host's page.
215 if ((vcpu->arch.magic_page_pa & 0xf000) !=
216 ((ulong)vcpu->arch.shared & 0xf000)) {
217 void *old_shared = vcpu->arch.shared;
218 ulong shared = (ulong)vcpu->arch.shared;
222 shared |= vcpu->arch.magic_page_pa & 0xf000;
223 new_shared = (void*)shared;
224 memcpy(new_shared, old_shared, 0x1000);
225 vcpu->arch.shared = new_shared;
229 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
234 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
236 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
237 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
240 /* Second return value is in r4 */
242 case EV_HCALL_TOKEN(EV_IDLE):
244 kvm_vcpu_block(vcpu);
245 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
248 r = EV_UNIMPLEMENTED;
252 kvmppc_set_gpr(vcpu, 4, r2);
256 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
258 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
262 /* We have to know what CPU to virtualize */
266 /* PAPR only works with book3s_64 */
267 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
270 /* HV KVM can only do PAPR mode for now */
271 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
274 #ifdef CONFIG_KVM_BOOKE_HV
275 if (!cpu_has_feature(CPU_FTR_EMB_HV))
283 return r ? 0 : -EINVAL;
285 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
287 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
289 enum emulation_result er;
292 er = kvmppc_emulate_loadstore(vcpu);
295 /* Future optimization: only reload non-volatiles if they were
296 * actually modified. */
302 case EMULATE_DO_MMIO:
303 run->exit_reason = KVM_EXIT_MMIO;
304 /* We must reload nonvolatiles because "update" load/store
305 * instructions modify register state. */
306 /* Future optimization: only reload non-volatiles if they were
307 * actually modified. */
314 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
315 /* XXX Deliver Program interrupt to guest. */
316 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
327 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
329 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
332 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
333 struct kvmppc_pte pte;
338 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
339 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
342 if ((!r) || (r == -EAGAIN))
345 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
355 /* Magic page override */
356 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
357 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
358 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
359 void *magic = vcpu->arch.shared;
360 magic += pte.eaddr & 0xfff;
361 memcpy(magic, ptr, size);
365 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
366 return EMULATE_DO_MMIO;
370 EXPORT_SYMBOL_GPL(kvmppc_st);
372 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
375 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
376 struct kvmppc_pte pte;
381 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
382 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
385 if ((!rc) || (rc == -EAGAIN))
388 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
398 if (!data && !pte.may_execute)
401 /* Magic page override */
402 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
403 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
404 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
405 void *magic = vcpu->arch.shared;
406 magic += pte.eaddr & 0xfff;
407 memcpy(ptr, magic, size);
411 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
412 return EMULATE_DO_MMIO;
416 EXPORT_SYMBOL_GPL(kvmppc_ld);
418 int kvm_arch_hardware_enable(void)
423 int kvm_arch_hardware_setup(void)
428 void kvm_arch_check_processor_compat(void *rtn)
430 *(int *)rtn = kvmppc_core_check_processor_compat();
433 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
435 struct kvmppc_ops *kvm_ops = NULL;
437 * if we have both HV and PR enabled, default is HV
441 kvm_ops = kvmppc_hv_ops;
443 kvm_ops = kvmppc_pr_ops;
446 } else if (type == KVM_VM_PPC_HV) {
449 kvm_ops = kvmppc_hv_ops;
450 } else if (type == KVM_VM_PPC_PR) {
453 kvm_ops = kvmppc_pr_ops;
457 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
460 kvm->arch.kvm_ops = kvm_ops;
461 return kvmppc_core_init_vm(kvm);
466 bool kvm_arch_has_vcpu_debugfs(void)
471 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
476 void kvm_arch_destroy_vm(struct kvm *kvm)
479 struct kvm_vcpu *vcpu;
481 #ifdef CONFIG_KVM_XICS
483 * We call kick_all_cpus_sync() to ensure that all
484 * CPUs have executed any pending IPIs before we
485 * continue and free VCPUs structures below.
487 if (is_kvmppc_hv_enabled(kvm))
488 kick_all_cpus_sync();
491 kvm_for_each_vcpu(i, vcpu, kvm)
492 kvm_arch_vcpu_free(vcpu);
494 mutex_lock(&kvm->lock);
495 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
496 kvm->vcpus[i] = NULL;
498 atomic_set(&kvm->online_vcpus, 0);
500 kvmppc_core_destroy_vm(kvm);
502 mutex_unlock(&kvm->lock);
504 /* drop the module reference */
505 module_put(kvm->arch.kvm_ops->owner);
508 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
511 /* Assume we're using HV mode when the HV module is loaded */
512 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
516 * Hooray - we know which VM type we're running on. Depend on
517 * that rather than the guess above.
519 hv_enabled = is_kvmppc_hv_enabled(kvm);
524 case KVM_CAP_PPC_BOOKE_SREGS:
525 case KVM_CAP_PPC_BOOKE_WATCHDOG:
526 case KVM_CAP_PPC_EPR:
528 case KVM_CAP_PPC_SEGSTATE:
529 case KVM_CAP_PPC_HIOR:
530 case KVM_CAP_PPC_PAPR:
532 case KVM_CAP_PPC_UNSET_IRQ:
533 case KVM_CAP_PPC_IRQ_LEVEL:
534 case KVM_CAP_ENABLE_CAP:
535 case KVM_CAP_ONE_REG:
536 case KVM_CAP_IOEVENTFD:
537 case KVM_CAP_DEVICE_CTRL:
538 case KVM_CAP_IMMEDIATE_EXIT:
541 case KVM_CAP_PPC_PAIRED_SINGLES:
542 case KVM_CAP_PPC_OSI:
543 case KVM_CAP_PPC_GET_PVINFO:
544 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
547 /* We support this only for PR */
550 #ifdef CONFIG_KVM_MPIC
551 case KVM_CAP_IRQ_MPIC:
556 #ifdef CONFIG_PPC_BOOK3S_64
557 case KVM_CAP_SPAPR_TCE:
558 case KVM_CAP_SPAPR_TCE_64:
561 case KVM_CAP_SPAPR_TCE_VFIO:
562 r = !!cpu_has_feature(CPU_FTR_HVMODE);
564 case KVM_CAP_PPC_RTAS:
565 case KVM_CAP_PPC_FIXUP_HCALL:
566 case KVM_CAP_PPC_ENABLE_HCALL:
567 #ifdef CONFIG_KVM_XICS
568 case KVM_CAP_IRQ_XICS:
570 case KVM_CAP_PPC_GET_CPU_CHAR:
573 #ifdef CONFIG_KVM_XIVE
574 case KVM_CAP_PPC_IRQ_XIVE:
576 * We need XIVE to be enabled on the platform (implies
577 * a POWER9 processor) and the PowerNV platform, as
578 * nested is not yet supported.
580 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
584 case KVM_CAP_PPC_ALLOC_HTAB:
587 #endif /* CONFIG_PPC_BOOK3S_64 */
588 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
589 case KVM_CAP_PPC_SMT:
592 if (kvm->arch.emul_smt_mode > 1)
593 r = kvm->arch.emul_smt_mode;
595 r = kvm->arch.smt_mode;
596 } else if (hv_enabled) {
597 if (cpu_has_feature(CPU_FTR_ARCH_300))
600 r = threads_per_subcore;
603 case KVM_CAP_PPC_SMT_POSSIBLE:
606 if (!cpu_has_feature(CPU_FTR_ARCH_300))
607 r = ((threads_per_subcore << 1) - 1);
609 /* P9 can emulate dbells, so allow any mode */
613 case KVM_CAP_PPC_RMA:
616 case KVM_CAP_PPC_HWRNG:
617 r = kvmppc_hwrng_present();
619 case KVM_CAP_PPC_MMU_RADIX:
620 r = !!(hv_enabled && radix_enabled());
622 case KVM_CAP_PPC_MMU_HASH_V3:
623 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
624 cpu_has_feature(CPU_FTR_HVMODE));
626 case KVM_CAP_PPC_NESTED_HV:
627 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
628 !kvmppc_hv_ops->enable_nested(NULL));
631 case KVM_CAP_SYNC_MMU:
632 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
634 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641 case KVM_CAP_PPC_HTAB_FD:
645 case KVM_CAP_NR_VCPUS:
647 * Recommending a number of CPUs is somewhat arbitrary; we
648 * return the number of present CPUs for -HV (since a host
649 * will have secondary threads "offline"), and for other KVM
650 * implementations just count online CPUs.
653 r = num_present_cpus();
655 r = num_online_cpus();
657 case KVM_CAP_MAX_VCPUS:
660 #ifdef CONFIG_PPC_BOOK3S_64
661 case KVM_CAP_PPC_GET_SMMU_INFO:
664 case KVM_CAP_SPAPR_MULTITCE:
667 case KVM_CAP_SPAPR_RESIZE_HPT:
671 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
672 case KVM_CAP_PPC_FWNMI:
676 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
677 case KVM_CAP_PPC_HTM:
678 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
679 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
690 long kvm_arch_dev_ioctl(struct file *filp,
691 unsigned int ioctl, unsigned long arg)
696 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
697 struct kvm_memory_slot *dont)
699 kvmppc_core_free_memslot(kvm, free, dont);
702 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
703 unsigned long npages)
705 return kvmppc_core_create_memslot(kvm, slot, npages);
708 int kvm_arch_prepare_memory_region(struct kvm *kvm,
709 struct kvm_memory_slot *memslot,
710 const struct kvm_userspace_memory_region *mem,
711 enum kvm_mr_change change)
713 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
716 void kvm_arch_commit_memory_region(struct kvm *kvm,
717 const struct kvm_userspace_memory_region *mem,
718 const struct kvm_memory_slot *old,
719 const struct kvm_memory_slot *new,
720 enum kvm_mr_change change)
722 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
725 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
726 struct kvm_memory_slot *slot)
728 kvmppc_core_flush_memslot(kvm, slot);
731 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
733 struct kvm_vcpu *vcpu;
734 vcpu = kvmppc_core_vcpu_create(kvm, id);
736 vcpu->arch.wqp = &vcpu->wq;
737 kvmppc_create_vcpu_debugfs(vcpu, id);
742 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
746 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
748 /* Make sure we're not using the vcpu anymore */
749 hrtimer_cancel(&vcpu->arch.dec_timer);
751 kvmppc_remove_vcpu_debugfs(vcpu);
753 switch (vcpu->arch.irq_type) {
754 case KVMPPC_IRQ_MPIC:
755 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
757 case KVMPPC_IRQ_XICS:
759 kvmppc_xive_cleanup_vcpu(vcpu);
761 kvmppc_xics_free_icp(vcpu);
763 case KVMPPC_IRQ_XIVE:
764 kvmppc_xive_native_cleanup_vcpu(vcpu);
768 kvmppc_core_vcpu_free(vcpu);
771 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
773 kvm_arch_vcpu_free(vcpu);
776 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
778 return kvmppc_core_pending_dec(vcpu);
781 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
783 struct kvm_vcpu *vcpu;
785 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
786 kvmppc_decrementer_func(vcpu);
788 return HRTIMER_NORESTART;
791 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
795 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
796 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
797 vcpu->arch.dec_expires = get_tb();
799 #ifdef CONFIG_KVM_EXIT_TIMING
800 mutex_init(&vcpu->arch.exit_timing_lock);
802 ret = kvmppc_subarch_vcpu_init(vcpu);
806 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
808 kvmppc_mmu_destroy(vcpu);
809 kvmppc_subarch_vcpu_uninit(vcpu);
812 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816 * vrsave (formerly usprg0) isn't used by Linux, but may
817 * be used by the guest.
819 * On non-booke this is associated with Altivec and
820 * is handled by code in book3s.c.
822 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
824 kvmppc_core_vcpu_load(vcpu, cpu);
827 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
829 kvmppc_core_vcpu_put(vcpu);
831 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
836 * irq_bypass_add_producer and irq_bypass_del_producer are only
837 * useful if the architecture supports PCI passthrough.
838 * irq_bypass_stop and irq_bypass_start are not needed and so
839 * kvm_ops are not defined for them.
841 bool kvm_arch_has_irq_bypass(void)
843 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
844 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
847 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
848 struct irq_bypass_producer *prod)
850 struct kvm_kernel_irqfd *irqfd =
851 container_of(cons, struct kvm_kernel_irqfd, consumer);
852 struct kvm *kvm = irqfd->kvm;
854 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
855 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
860 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
861 struct irq_bypass_producer *prod)
863 struct kvm_kernel_irqfd *irqfd =
864 container_of(cons, struct kvm_kernel_irqfd, consumer);
865 struct kvm *kvm = irqfd->kvm;
867 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
868 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
872 static inline int kvmppc_get_vsr_dword_offset(int index)
876 if ((index != 0) && (index != 1))
888 static inline int kvmppc_get_vsr_word_offset(int index)
892 if ((index > 3) || (index < 0))
903 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
906 union kvmppc_one_reg val;
907 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
908 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
914 val.vval = VCPU_VSX_VR(vcpu, index - 32);
915 val.vsxval[offset] = gpr;
916 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
918 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
922 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
925 union kvmppc_one_reg val;
926 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
929 val.vval = VCPU_VSX_VR(vcpu, index - 32);
932 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
934 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
935 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
939 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
942 union kvmppc_one_reg val;
943 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
946 val.vsx32val[0] = gpr;
947 val.vsx32val[1] = gpr;
948 val.vsx32val[2] = gpr;
949 val.vsx32val[3] = gpr;
950 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
952 val.vsx32val[0] = gpr;
953 val.vsx32val[1] = gpr;
954 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
955 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
959 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
962 union kvmppc_one_reg val;
963 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
964 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
965 int dword_offset, word_offset;
971 val.vval = VCPU_VSX_VR(vcpu, index - 32);
972 val.vsx32val[offset] = gpr32;
973 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
975 dword_offset = offset / 2;
976 word_offset = offset % 2;
977 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
978 val.vsx32val[word_offset] = gpr32;
979 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
982 #endif /* CONFIG_VSX */
984 #ifdef CONFIG_ALTIVEC
985 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
986 int index, int element_size)
989 int elts = sizeof(vector128)/element_size;
991 if ((index < 0) || (index >= elts))
994 if (kvmppc_need_byteswap(vcpu))
995 offset = elts - index - 1;
1002 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1005 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1008 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1011 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1014 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1017 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1020 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1023 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1027 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1030 union kvmppc_one_reg val;
1031 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1032 vcpu->arch.mmio_vmx_offset);
1033 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1038 val.vval = VCPU_VSX_VR(vcpu, index);
1039 val.vsxval[offset] = gpr;
1040 VCPU_VSX_VR(vcpu, index) = val.vval;
1043 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1046 union kvmppc_one_reg val;
1047 int offset = kvmppc_get_vmx_word_offset(vcpu,
1048 vcpu->arch.mmio_vmx_offset);
1049 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1054 val.vval = VCPU_VSX_VR(vcpu, index);
1055 val.vsx32val[offset] = gpr32;
1056 VCPU_VSX_VR(vcpu, index) = val.vval;
1059 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1062 union kvmppc_one_reg val;
1063 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1064 vcpu->arch.mmio_vmx_offset);
1065 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1070 val.vval = VCPU_VSX_VR(vcpu, index);
1071 val.vsx16val[offset] = gpr16;
1072 VCPU_VSX_VR(vcpu, index) = val.vval;
1075 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1078 union kvmppc_one_reg val;
1079 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1080 vcpu->arch.mmio_vmx_offset);
1081 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1086 val.vval = VCPU_VSX_VR(vcpu, index);
1087 val.vsx8val[offset] = gpr8;
1088 VCPU_VSX_VR(vcpu, index) = val.vval;
1090 #endif /* CONFIG_ALTIVEC */
1092 #ifdef CONFIG_PPC_FPU
1093 static inline u64 sp_to_dp(u32 fprs)
1099 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1105 static inline u32 dp_to_sp(u64 fprd)
1111 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1118 #define sp_to_dp(x) (x)
1119 #define dp_to_sp(x) (x)
1120 #endif /* CONFIG_PPC_FPU */
1122 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1123 struct kvm_run *run)
1125 u64 uninitialized_var(gpr);
1127 if (run->mmio.len > sizeof(gpr)) {
1128 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1132 if (!vcpu->arch.mmio_host_swabbed) {
1133 switch (run->mmio.len) {
1134 case 8: gpr = *(u64 *)run->mmio.data; break;
1135 case 4: gpr = *(u32 *)run->mmio.data; break;
1136 case 2: gpr = *(u16 *)run->mmio.data; break;
1137 case 1: gpr = *(u8 *)run->mmio.data; break;
1140 switch (run->mmio.len) {
1141 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1142 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1143 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1144 case 1: gpr = *(u8 *)run->mmio.data; break;
1148 /* conversion between single and double precision */
1149 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1150 gpr = sp_to_dp(gpr);
1152 if (vcpu->arch.mmio_sign_extend) {
1153 switch (run->mmio.len) {
1156 gpr = (s64)(s32)gpr;
1160 gpr = (s64)(s16)gpr;
1168 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1169 case KVM_MMIO_REG_GPR:
1170 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1172 case KVM_MMIO_REG_FPR:
1173 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1174 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1176 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1178 #ifdef CONFIG_PPC_BOOK3S
1179 case KVM_MMIO_REG_QPR:
1180 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1182 case KVM_MMIO_REG_FQPR:
1183 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1184 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1188 case KVM_MMIO_REG_VSX:
1189 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1190 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1192 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1193 kvmppc_set_vsr_dword(vcpu, gpr);
1194 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1195 kvmppc_set_vsr_word(vcpu, gpr);
1196 else if (vcpu->arch.mmio_copy_type ==
1197 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1198 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1199 else if (vcpu->arch.mmio_copy_type ==
1200 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1201 kvmppc_set_vsr_word_dump(vcpu, gpr);
1204 #ifdef CONFIG_ALTIVEC
1205 case KVM_MMIO_REG_VMX:
1206 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1207 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1209 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1210 kvmppc_set_vmx_dword(vcpu, gpr);
1211 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1212 kvmppc_set_vmx_word(vcpu, gpr);
1213 else if (vcpu->arch.mmio_copy_type ==
1214 KVMPPC_VMX_COPY_HWORD)
1215 kvmppc_set_vmx_hword(vcpu, gpr);
1216 else if (vcpu->arch.mmio_copy_type ==
1217 KVMPPC_VMX_COPY_BYTE)
1218 kvmppc_set_vmx_byte(vcpu, gpr);
1221 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1222 case KVM_MMIO_REG_NESTED_GPR:
1223 if (kvmppc_need_byteswap(vcpu))
1225 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1234 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1235 unsigned int rt, unsigned int bytes,
1236 int is_default_endian, int sign_extend)
1241 /* Pity C doesn't have a logical XOR operator */
1242 if (kvmppc_need_byteswap(vcpu)) {
1243 host_swabbed = is_default_endian;
1245 host_swabbed = !is_default_endian;
1248 if (bytes > sizeof(run->mmio.data)) {
1249 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1253 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1254 run->mmio.len = bytes;
1255 run->mmio.is_write = 0;
1257 vcpu->arch.io_gpr = rt;
1258 vcpu->arch.mmio_host_swabbed = host_swabbed;
1259 vcpu->mmio_needed = 1;
1260 vcpu->mmio_is_write = 0;
1261 vcpu->arch.mmio_sign_extend = sign_extend;
1263 idx = srcu_read_lock(&vcpu->kvm->srcu);
1265 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1266 bytes, &run->mmio.data);
1268 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1271 kvmppc_complete_mmio_load(vcpu, run);
1272 vcpu->mmio_needed = 0;
1273 return EMULATE_DONE;
1276 return EMULATE_DO_MMIO;
1279 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1280 unsigned int rt, unsigned int bytes,
1281 int is_default_endian)
1283 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1285 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1287 /* Same as above, but sign extends */
1288 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1289 unsigned int rt, unsigned int bytes,
1290 int is_default_endian)
1292 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1296 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1297 unsigned int rt, unsigned int bytes,
1298 int is_default_endian, int mmio_sign_extend)
1300 enum emulation_result emulated = EMULATE_DONE;
1302 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1303 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1304 return EMULATE_FAIL;
1306 while (vcpu->arch.mmio_vsx_copy_nums) {
1307 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1308 is_default_endian, mmio_sign_extend);
1310 if (emulated != EMULATE_DONE)
1313 vcpu->arch.paddr_accessed += run->mmio.len;
1315 vcpu->arch.mmio_vsx_copy_nums--;
1316 vcpu->arch.mmio_vsx_offset++;
1320 #endif /* CONFIG_VSX */
1322 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1323 u64 val, unsigned int bytes, int is_default_endian)
1325 void *data = run->mmio.data;
1329 /* Pity C doesn't have a logical XOR operator */
1330 if (kvmppc_need_byteswap(vcpu)) {
1331 host_swabbed = is_default_endian;
1333 host_swabbed = !is_default_endian;
1336 if (bytes > sizeof(run->mmio.data)) {
1337 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1341 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1342 run->mmio.len = bytes;
1343 run->mmio.is_write = 1;
1344 vcpu->mmio_needed = 1;
1345 vcpu->mmio_is_write = 1;
1347 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1348 val = dp_to_sp(val);
1350 /* Store the value at the lowest bytes in 'data'. */
1351 if (!host_swabbed) {
1353 case 8: *(u64 *)data = val; break;
1354 case 4: *(u32 *)data = val; break;
1355 case 2: *(u16 *)data = val; break;
1356 case 1: *(u8 *)data = val; break;
1360 case 8: *(u64 *)data = swab64(val); break;
1361 case 4: *(u32 *)data = swab32(val); break;
1362 case 2: *(u16 *)data = swab16(val); break;
1363 case 1: *(u8 *)data = val; break;
1367 idx = srcu_read_lock(&vcpu->kvm->srcu);
1369 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1370 bytes, &run->mmio.data);
1372 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1375 vcpu->mmio_needed = 0;
1376 return EMULATE_DONE;
1379 return EMULATE_DO_MMIO;
1381 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1384 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1386 u32 dword_offset, word_offset;
1387 union kvmppc_one_reg reg;
1389 int copy_type = vcpu->arch.mmio_copy_type;
1392 switch (copy_type) {
1393 case KVMPPC_VSX_COPY_DWORD:
1395 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1397 if (vsx_offset == -1) {
1403 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1405 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1406 *val = reg.vsxval[vsx_offset];
1410 case KVMPPC_VSX_COPY_WORD:
1412 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1414 if (vsx_offset == -1) {
1420 dword_offset = vsx_offset / 2;
1421 word_offset = vsx_offset % 2;
1422 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1423 *val = reg.vsx32val[word_offset];
1425 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1426 *val = reg.vsx32val[vsx_offset];
1438 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1439 int rs, unsigned int bytes, int is_default_endian)
1442 enum emulation_result emulated = EMULATE_DONE;
1444 vcpu->arch.io_gpr = rs;
1446 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1447 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1448 return EMULATE_FAIL;
1450 while (vcpu->arch.mmio_vsx_copy_nums) {
1451 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1452 return EMULATE_FAIL;
1454 emulated = kvmppc_handle_store(run, vcpu,
1455 val, bytes, is_default_endian);
1457 if (emulated != EMULATE_DONE)
1460 vcpu->arch.paddr_accessed += run->mmio.len;
1462 vcpu->arch.mmio_vsx_copy_nums--;
1463 vcpu->arch.mmio_vsx_offset++;
1469 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1470 struct kvm_run *run)
1472 enum emulation_result emulated = EMULATE_FAIL;
1475 vcpu->arch.paddr_accessed += run->mmio.len;
1477 if (!vcpu->mmio_is_write) {
1478 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1479 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1481 emulated = kvmppc_handle_vsx_store(run, vcpu,
1482 vcpu->arch.io_gpr, run->mmio.len, 1);
1486 case EMULATE_DO_MMIO:
1487 run->exit_reason = KVM_EXIT_MMIO;
1491 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1492 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1493 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1502 #endif /* CONFIG_VSX */
1504 #ifdef CONFIG_ALTIVEC
1505 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1506 unsigned int rt, unsigned int bytes, int is_default_endian)
1508 enum emulation_result emulated = EMULATE_DONE;
1510 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1511 return EMULATE_FAIL;
1513 while (vcpu->arch.mmio_vmx_copy_nums) {
1514 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1515 is_default_endian, 0);
1517 if (emulated != EMULATE_DONE)
1520 vcpu->arch.paddr_accessed += run->mmio.len;
1521 vcpu->arch.mmio_vmx_copy_nums--;
1522 vcpu->arch.mmio_vmx_offset++;
1528 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1530 union kvmppc_one_reg reg;
1535 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1537 if (vmx_offset == -1)
1540 reg.vval = VCPU_VSX_VR(vcpu, index);
1541 *val = reg.vsxval[vmx_offset];
1546 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1548 union kvmppc_one_reg reg;
1553 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1555 if (vmx_offset == -1)
1558 reg.vval = VCPU_VSX_VR(vcpu, index);
1559 *val = reg.vsx32val[vmx_offset];
1564 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1566 union kvmppc_one_reg reg;
1571 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1573 if (vmx_offset == -1)
1576 reg.vval = VCPU_VSX_VR(vcpu, index);
1577 *val = reg.vsx16val[vmx_offset];
1582 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1584 union kvmppc_one_reg reg;
1589 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1591 if (vmx_offset == -1)
1594 reg.vval = VCPU_VSX_VR(vcpu, index);
1595 *val = reg.vsx8val[vmx_offset];
1600 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1601 unsigned int rs, unsigned int bytes, int is_default_endian)
1604 unsigned int index = rs & KVM_MMIO_REG_MASK;
1605 enum emulation_result emulated = EMULATE_DONE;
1607 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1608 return EMULATE_FAIL;
1610 vcpu->arch.io_gpr = rs;
1612 while (vcpu->arch.mmio_vmx_copy_nums) {
1613 switch (vcpu->arch.mmio_copy_type) {
1614 case KVMPPC_VMX_COPY_DWORD:
1615 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1616 return EMULATE_FAIL;
1619 case KVMPPC_VMX_COPY_WORD:
1620 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1621 return EMULATE_FAIL;
1623 case KVMPPC_VMX_COPY_HWORD:
1624 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1625 return EMULATE_FAIL;
1627 case KVMPPC_VMX_COPY_BYTE:
1628 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1629 return EMULATE_FAIL;
1632 return EMULATE_FAIL;
1635 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1637 if (emulated != EMULATE_DONE)
1640 vcpu->arch.paddr_accessed += run->mmio.len;
1641 vcpu->arch.mmio_vmx_copy_nums--;
1642 vcpu->arch.mmio_vmx_offset++;
1648 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1649 struct kvm_run *run)
1651 enum emulation_result emulated = EMULATE_FAIL;
1654 vcpu->arch.paddr_accessed += run->mmio.len;
1656 if (!vcpu->mmio_is_write) {
1657 emulated = kvmppc_handle_vmx_load(run, vcpu,
1658 vcpu->arch.io_gpr, run->mmio.len, 1);
1660 emulated = kvmppc_handle_vmx_store(run, vcpu,
1661 vcpu->arch.io_gpr, run->mmio.len, 1);
1665 case EMULATE_DO_MMIO:
1666 run->exit_reason = KVM_EXIT_MMIO;
1670 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1671 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1672 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1681 #endif /* CONFIG_ALTIVEC */
1683 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1686 union kvmppc_one_reg val;
1689 size = one_reg_size(reg->id);
1690 if (size > sizeof(val))
1693 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1697 #ifdef CONFIG_ALTIVEC
1698 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1699 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1703 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1705 case KVM_REG_PPC_VSCR:
1706 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1710 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1712 case KVM_REG_PPC_VRSAVE:
1713 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1715 #endif /* CONFIG_ALTIVEC */
1725 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1731 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1734 union kvmppc_one_reg val;
1737 size = one_reg_size(reg->id);
1738 if (size > sizeof(val))
1741 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1744 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1748 #ifdef CONFIG_ALTIVEC
1749 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1750 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1754 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1756 case KVM_REG_PPC_VSCR:
1757 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1761 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1763 case KVM_REG_PPC_VRSAVE:
1764 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1768 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1770 #endif /* CONFIG_ALTIVEC */
1780 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1786 if (vcpu->mmio_needed) {
1787 vcpu->mmio_needed = 0;
1788 if (!vcpu->mmio_is_write)
1789 kvmppc_complete_mmio_load(vcpu, run);
1791 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1792 vcpu->arch.mmio_vsx_copy_nums--;
1793 vcpu->arch.mmio_vsx_offset++;
1796 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1797 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1798 if (r == RESUME_HOST) {
1799 vcpu->mmio_needed = 1;
1804 #ifdef CONFIG_ALTIVEC
1805 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1806 vcpu->arch.mmio_vmx_copy_nums--;
1807 vcpu->arch.mmio_vmx_offset++;
1810 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1811 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1812 if (r == RESUME_HOST) {
1813 vcpu->mmio_needed = 1;
1818 } else if (vcpu->arch.osi_needed) {
1819 u64 *gprs = run->osi.gprs;
1822 for (i = 0; i < 32; i++)
1823 kvmppc_set_gpr(vcpu, i, gprs[i]);
1824 vcpu->arch.osi_needed = 0;
1825 } else if (vcpu->arch.hcall_needed) {
1828 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1829 for (i = 0; i < 9; ++i)
1830 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1831 vcpu->arch.hcall_needed = 0;
1833 } else if (vcpu->arch.epr_needed) {
1834 kvmppc_set_epr(vcpu, run->epr.epr);
1835 vcpu->arch.epr_needed = 0;
1839 kvm_sigset_activate(vcpu);
1841 if (run->immediate_exit)
1844 r = kvmppc_vcpu_run(run, vcpu);
1846 kvm_sigset_deactivate(vcpu);
1848 #ifdef CONFIG_ALTIVEC
1855 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1857 if (irq->irq == KVM_INTERRUPT_UNSET) {
1858 kvmppc_core_dequeue_external(vcpu);
1862 kvmppc_core_queue_external(vcpu, irq);
1864 kvm_vcpu_kick(vcpu);
1869 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1870 struct kvm_enable_cap *cap)
1878 case KVM_CAP_PPC_OSI:
1880 vcpu->arch.osi_enabled = true;
1882 case KVM_CAP_PPC_PAPR:
1884 vcpu->arch.papr_enabled = true;
1886 case KVM_CAP_PPC_EPR:
1889 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1891 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1894 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1896 vcpu->arch.watchdog_enabled = true;
1899 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1900 case KVM_CAP_SW_TLB: {
1901 struct kvm_config_tlb cfg;
1902 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1905 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1908 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1912 #ifdef CONFIG_KVM_MPIC
1913 case KVM_CAP_IRQ_MPIC: {
1915 struct kvm_device *dev;
1918 f = fdget(cap->args[0]);
1923 dev = kvm_device_from_filp(f.file);
1925 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1931 #ifdef CONFIG_KVM_XICS
1932 case KVM_CAP_IRQ_XICS: {
1934 struct kvm_device *dev;
1937 f = fdget(cap->args[0]);
1942 dev = kvm_device_from_filp(f.file);
1945 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1947 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1953 #endif /* CONFIG_KVM_XICS */
1954 #ifdef CONFIG_KVM_XIVE
1955 case KVM_CAP_PPC_IRQ_XIVE: {
1957 struct kvm_device *dev;
1960 f = fdget(cap->args[0]);
1965 if (!xive_enabled())
1969 dev = kvm_device_from_filp(f.file);
1971 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1977 #endif /* CONFIG_KVM_XIVE */
1978 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1979 case KVM_CAP_PPC_FWNMI:
1981 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1984 vcpu->kvm->arch.fwnmi_enabled = true;
1986 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1993 r = kvmppc_sanity_check(vcpu);
1998 bool kvm_arch_intc_initialized(struct kvm *kvm)
2000 #ifdef CONFIG_KVM_MPIC
2004 #ifdef CONFIG_KVM_XICS
2005 if (kvm->arch.xics || kvm->arch.xive)
2011 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2012 struct kvm_mp_state *mp_state)
2017 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2018 struct kvm_mp_state *mp_state)
2023 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2024 unsigned int ioctl, unsigned long arg)
2026 struct kvm_vcpu *vcpu = filp->private_data;
2027 void __user *argp = (void __user *)arg;
2029 if (ioctl == KVM_INTERRUPT) {
2030 struct kvm_interrupt irq;
2031 if (copy_from_user(&irq, argp, sizeof(irq)))
2033 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2035 return -ENOIOCTLCMD;
2038 long kvm_arch_vcpu_ioctl(struct file *filp,
2039 unsigned int ioctl, unsigned long arg)
2041 struct kvm_vcpu *vcpu = filp->private_data;
2042 void __user *argp = (void __user *)arg;
2046 case KVM_ENABLE_CAP:
2048 struct kvm_enable_cap cap;
2051 if (copy_from_user(&cap, argp, sizeof(cap)))
2053 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2058 case KVM_SET_ONE_REG:
2059 case KVM_GET_ONE_REG:
2061 struct kvm_one_reg reg;
2063 if (copy_from_user(®, argp, sizeof(reg)))
2065 if (ioctl == KVM_SET_ONE_REG)
2066 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2068 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2072 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2073 case KVM_DIRTY_TLB: {
2074 struct kvm_dirty_tlb dirty;
2077 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2079 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2092 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2094 return VM_FAULT_SIGBUS;
2097 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2099 u32 inst_nop = 0x60000000;
2100 #ifdef CONFIG_KVM_BOOKE_HV
2101 u32 inst_sc1 = 0x44000022;
2102 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2103 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2104 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2105 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2107 u32 inst_lis = 0x3c000000;
2108 u32 inst_ori = 0x60000000;
2109 u32 inst_sc = 0x44000002;
2110 u32 inst_imm_mask = 0xffff;
2113 * The hypercall to get into KVM from within guest context is as
2116 * lis r0, r0, KVM_SC_MAGIC_R0@h
2117 * ori r0, KVM_SC_MAGIC_R0@l
2121 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2122 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2123 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2124 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2127 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2132 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2135 if (!irqchip_in_kernel(kvm))
2138 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2139 irq_event->irq, irq_event->level,
2145 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2146 struct kvm_enable_cap *cap)
2154 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2155 case KVM_CAP_PPC_ENABLE_HCALL: {
2156 unsigned long hcall = cap->args[0];
2159 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2162 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2165 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2167 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2171 case KVM_CAP_PPC_SMT: {
2172 unsigned long mode = cap->args[0];
2173 unsigned long flags = cap->args[1];
2176 if (kvm->arch.kvm_ops->set_smt_mode)
2177 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2181 case KVM_CAP_PPC_NESTED_HV:
2183 if (!is_kvmppc_hv_enabled(kvm) ||
2184 !kvm->arch.kvm_ops->enable_nested)
2186 r = kvm->arch.kvm_ops->enable_nested(kvm);
2197 #ifdef CONFIG_PPC_BOOK3S_64
2199 * These functions check whether the underlying hardware is safe
2200 * against attacks based on observing the effects of speculatively
2201 * executed instructions, and whether it supplies instructions for
2202 * use in workarounds. The information comes from firmware, either
2203 * via the device tree on powernv platforms or from an hcall on
2204 * pseries platforms.
2206 #ifdef CONFIG_PPC_PSERIES
2207 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2209 struct h_cpu_char_result c;
2212 if (!machine_is(pseries))
2215 rc = plpar_get_cpu_characteristics(&c);
2216 if (rc == H_SUCCESS) {
2217 cp->character = c.character;
2218 cp->behaviour = c.behaviour;
2219 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2220 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2221 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2222 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2223 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2224 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2225 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2226 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2227 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2228 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2229 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2230 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2231 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2236 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2242 static inline bool have_fw_feat(struct device_node *fw_features,
2243 const char *state, const char *name)
2245 struct device_node *np;
2248 np = of_get_child_by_name(fw_features, name);
2250 r = of_property_read_bool(np, state);
2256 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2258 struct device_node *np, *fw_features;
2261 memset(cp, 0, sizeof(*cp));
2262 r = pseries_get_cpu_char(cp);
2266 np = of_find_node_by_name(NULL, "ibm,opal");
2268 fw_features = of_get_child_by_name(np, "fw-features");
2272 if (have_fw_feat(fw_features, "enabled",
2273 "inst-spec-barrier-ori31,31,0"))
2274 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2275 if (have_fw_feat(fw_features, "enabled",
2276 "fw-bcctrl-serialized"))
2277 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2278 if (have_fw_feat(fw_features, "enabled",
2279 "inst-l1d-flush-ori30,30,0"))
2280 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2281 if (have_fw_feat(fw_features, "enabled",
2282 "inst-l1d-flush-trig2"))
2283 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2284 if (have_fw_feat(fw_features, "enabled",
2285 "fw-l1d-thread-split"))
2286 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2287 if (have_fw_feat(fw_features, "enabled",
2288 "fw-count-cache-disabled"))
2289 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2290 if (have_fw_feat(fw_features, "enabled",
2291 "fw-count-cache-flush-bcctr2,0,0"))
2292 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2293 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2294 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2295 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2296 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2297 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2298 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2299 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2301 if (have_fw_feat(fw_features, "enabled",
2302 "speculation-policy-favor-security"))
2303 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2304 if (!have_fw_feat(fw_features, "disabled",
2305 "needs-l1d-flush-msr-pr-0-to-1"))
2306 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2307 if (!have_fw_feat(fw_features, "disabled",
2308 "needs-spec-barrier-for-bound-checks"))
2309 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2310 if (have_fw_feat(fw_features, "enabled",
2311 "needs-count-cache-flush-on-context-switch"))
2312 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2313 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2314 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2315 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2316 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2318 of_node_put(fw_features);
2325 long kvm_arch_vm_ioctl(struct file *filp,
2326 unsigned int ioctl, unsigned long arg)
2328 struct kvm *kvm __maybe_unused = filp->private_data;
2329 void __user *argp = (void __user *)arg;
2333 case KVM_PPC_GET_PVINFO: {
2334 struct kvm_ppc_pvinfo pvinfo;
2335 memset(&pvinfo, 0, sizeof(pvinfo));
2336 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2337 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2344 #ifdef CONFIG_SPAPR_TCE_IOMMU
2345 case KVM_CREATE_SPAPR_TCE_64: {
2346 struct kvm_create_spapr_tce_64 create_tce_64;
2349 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2351 if (create_tce_64.flags) {
2355 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2358 case KVM_CREATE_SPAPR_TCE: {
2359 struct kvm_create_spapr_tce create_tce;
2360 struct kvm_create_spapr_tce_64 create_tce_64;
2363 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2366 create_tce_64.liobn = create_tce.liobn;
2367 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2368 create_tce_64.offset = 0;
2369 create_tce_64.size = create_tce.window_size >>
2370 IOMMU_PAGE_SHIFT_4K;
2371 create_tce_64.flags = 0;
2372 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2376 #ifdef CONFIG_PPC_BOOK3S_64
2377 case KVM_PPC_GET_SMMU_INFO: {
2378 struct kvm_ppc_smmu_info info;
2379 struct kvm *kvm = filp->private_data;
2381 memset(&info, 0, sizeof(info));
2382 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2383 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2387 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2388 struct kvm *kvm = filp->private_data;
2390 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2393 case KVM_PPC_CONFIGURE_V3_MMU: {
2394 struct kvm *kvm = filp->private_data;
2395 struct kvm_ppc_mmuv3_cfg cfg;
2398 if (!kvm->arch.kvm_ops->configure_mmu)
2401 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2403 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2406 case KVM_PPC_GET_RMMU_INFO: {
2407 struct kvm *kvm = filp->private_data;
2408 struct kvm_ppc_rmmu_info info;
2411 if (!kvm->arch.kvm_ops->get_rmmu_info)
2413 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2414 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2418 case KVM_PPC_GET_CPU_CHAR: {
2419 struct kvm_ppc_cpu_char cpuchar;
2421 r = kvmppc_get_cpu_char(&cpuchar);
2422 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2427 struct kvm *kvm = filp->private_data;
2428 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2430 #else /* CONFIG_PPC_BOOK3S_64 */
2439 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2440 static unsigned long nr_lpids;
2442 long kvmppc_alloc_lpid(void)
2447 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2448 if (lpid >= nr_lpids) {
2449 pr_err("%s: No LPIDs free\n", __func__);
2452 } while (test_and_set_bit(lpid, lpid_inuse));
2456 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2458 void kvmppc_claim_lpid(long lpid)
2460 set_bit(lpid, lpid_inuse);
2462 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2464 void kvmppc_free_lpid(long lpid)
2466 clear_bit(lpid, lpid_inuse);
2468 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2470 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2472 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2473 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2475 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2477 int kvm_arch_init(void *opaque)
2482 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);