1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
39 #include "../mm/mmu_decl.h"
41 #define CREATE_TRACE_POINTS
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 return kvm_arch_vcpu_runnable(vcpu);
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
71 * Common checks before entering the guest world. Call with interrupts
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
83 WARN_ON(irqs_disabled());
94 if (signal_pending(current)) {
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
101 vcpu->mode = IN_GUEST_MODE;
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
114 if (kvm_request_pending(vcpu)) {
115 /* Make sure we process requests preemptable */
117 trace_kvm_check_requests(vcpu);
118 r = kvmppc_core_check_requests(vcpu);
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
131 guest_enter_irqoff();
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 int nr = kvmppc_get_gpr(vcpu, 11);
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206 #ifdef CONFIG_PPC_64K_PAGES
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
236 /* Second return value is in r4 */
238 case EV_HCALL_TOKEN(EV_IDLE):
243 r = EV_UNIMPLEMENTED;
247 kvmppc_set_gpr(vcpu, 4, r2);
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
257 /* We have to know what CPU to virtualize */
261 /* PAPR only works with book3s_64 */
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
265 /* HV KVM can only do PAPR mode for now */
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
269 #ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
278 return r ? 0 : -EINVAL;
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
284 enum emulation_result er;
287 er = kvmppc_emulate_loadstore(vcpu);
290 /* Future optimization: only reload non-volatiles if they were
291 * actually modified. */
297 case EMULATE_DO_MMIO:
298 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 /* We must reload nonvolatiles because "update" load/store
300 * instructions modify register state. */
301 /* Future optimization: only reload non-volatiles if they were
302 * actually modified. */
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
314 * Injecting a Data Storage here is a bit more
315 * accurate since the instruction that caused the
316 * access could still be a valid one.
318 if (!IS_ENABLED(CONFIG_BOOKE)) {
319 ulong dsisr = DSISR_BADACCESS;
321 if (vcpu->mmio_is_write)
322 dsisr |= DSISR_ISSTORE;
324 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
327 * BookE does not send a SIGBUS on a bad
328 * fault, so use a Program interrupt instead
329 * to avoid a fault loop.
331 kvmppc_core_queue_program(vcpu, 0);
344 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
346 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
350 struct kvmppc_pte pte;
355 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
356 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359 if ((!r) || (r == -EAGAIN))
362 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
372 /* Magic page override */
373 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
374 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
375 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
376 void *magic = vcpu->arch.shared;
377 magic += pte.eaddr & 0xfff;
378 memcpy(magic, ptr, size);
382 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
383 return EMULATE_DO_MMIO;
387 EXPORT_SYMBOL_GPL(kvmppc_st);
389 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
392 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
393 struct kvmppc_pte pte;
398 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
399 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402 if ((!rc) || (rc == -EAGAIN))
405 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
415 if (!data && !pte.may_execute)
418 /* Magic page override */
419 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
420 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
421 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
422 void *magic = vcpu->arch.shared;
423 magic += pte.eaddr & 0xfff;
424 memcpy(ptr, magic, size);
428 kvm_vcpu_srcu_read_lock(vcpu);
429 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
430 kvm_vcpu_srcu_read_unlock(vcpu);
432 return EMULATE_DO_MMIO;
436 EXPORT_SYMBOL_GPL(kvmppc_ld);
438 int kvm_arch_hardware_enable(void)
443 int kvm_arch_check_processor_compat(void *opaque)
448 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
450 struct kvmppc_ops *kvm_ops = NULL;
454 * if we have both HV and PR enabled, default is HV
458 kvm_ops = kvmppc_hv_ops;
460 kvm_ops = kvmppc_pr_ops;
463 } else if (type == KVM_VM_PPC_HV) {
466 kvm_ops = kvmppc_hv_ops;
467 } else if (type == KVM_VM_PPC_PR) {
470 kvm_ops = kvmppc_pr_ops;
474 if (!try_module_get(kvm_ops->owner))
477 kvm->arch.kvm_ops = kvm_ops;
478 r = kvmppc_core_init_vm(kvm);
480 module_put(kvm_ops->owner);
486 void kvm_arch_destroy_vm(struct kvm *kvm)
488 #ifdef CONFIG_KVM_XICS
490 * We call kick_all_cpus_sync() to ensure that all
491 * CPUs have executed any pending IPIs before we
492 * continue and free VCPUs structures below.
494 if (is_kvmppc_hv_enabled(kvm))
495 kick_all_cpus_sync();
498 kvm_destroy_vcpus(kvm);
500 mutex_lock(&kvm->lock);
502 kvmppc_core_destroy_vm(kvm);
504 mutex_unlock(&kvm->lock);
506 /* drop the module reference */
507 module_put(kvm->arch.kvm_ops->owner);
510 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
513 /* Assume we're using HV mode when the HV module is loaded */
514 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
518 * Hooray - we know which VM type we're running on. Depend on
519 * that rather than the guess above.
521 hv_enabled = is_kvmppc_hv_enabled(kvm);
526 case KVM_CAP_PPC_BOOKE_SREGS:
527 case KVM_CAP_PPC_BOOKE_WATCHDOG:
528 case KVM_CAP_PPC_EPR:
530 case KVM_CAP_PPC_SEGSTATE:
531 case KVM_CAP_PPC_HIOR:
532 case KVM_CAP_PPC_PAPR:
534 case KVM_CAP_PPC_UNSET_IRQ:
535 case KVM_CAP_PPC_IRQ_LEVEL:
536 case KVM_CAP_ENABLE_CAP:
537 case KVM_CAP_ONE_REG:
538 case KVM_CAP_IOEVENTFD:
539 case KVM_CAP_DEVICE_CTRL:
540 case KVM_CAP_IMMEDIATE_EXIT:
541 case KVM_CAP_SET_GUEST_DEBUG:
544 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
545 case KVM_CAP_PPC_PAIRED_SINGLES:
546 case KVM_CAP_PPC_OSI:
547 case KVM_CAP_PPC_GET_PVINFO:
548 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
551 /* We support this only for PR */
554 #ifdef CONFIG_KVM_MPIC
555 case KVM_CAP_IRQ_MPIC:
560 #ifdef CONFIG_PPC_BOOK3S_64
561 case KVM_CAP_SPAPR_TCE:
562 case KVM_CAP_SPAPR_TCE_64:
565 case KVM_CAP_SPAPR_TCE_VFIO:
566 r = !!cpu_has_feature(CPU_FTR_HVMODE);
568 case KVM_CAP_PPC_RTAS:
569 case KVM_CAP_PPC_FIXUP_HCALL:
570 case KVM_CAP_PPC_ENABLE_HCALL:
571 #ifdef CONFIG_KVM_XICS
572 case KVM_CAP_IRQ_XICS:
574 case KVM_CAP_PPC_GET_CPU_CHAR:
577 #ifdef CONFIG_KVM_XIVE
578 case KVM_CAP_PPC_IRQ_XIVE:
580 * We need XIVE to be enabled on the platform (implies
581 * a POWER9 processor) and the PowerNV platform, as
582 * nested is not yet supported.
584 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
585 kvmppc_xive_native_supported();
589 case KVM_CAP_PPC_ALLOC_HTAB:
592 #endif /* CONFIG_PPC_BOOK3S_64 */
593 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
594 case KVM_CAP_PPC_SMT:
597 if (kvm->arch.emul_smt_mode > 1)
598 r = kvm->arch.emul_smt_mode;
600 r = kvm->arch.smt_mode;
601 } else if (hv_enabled) {
602 if (cpu_has_feature(CPU_FTR_ARCH_300))
605 r = threads_per_subcore;
608 case KVM_CAP_PPC_SMT_POSSIBLE:
611 if (!cpu_has_feature(CPU_FTR_ARCH_300))
612 r = ((threads_per_subcore << 1) - 1);
614 /* P9 can emulate dbells, so allow any mode */
618 case KVM_CAP_PPC_RMA:
621 case KVM_CAP_PPC_HWRNG:
622 r = kvmppc_hwrng_present();
624 case KVM_CAP_PPC_MMU_RADIX:
625 r = !!(hv_enabled && radix_enabled());
627 case KVM_CAP_PPC_MMU_HASH_V3:
628 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
629 kvmppc_hv_ops->hash_v3_possible());
631 case KVM_CAP_PPC_NESTED_HV:
632 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
633 !kvmppc_hv_ops->enable_nested(NULL));
636 case KVM_CAP_SYNC_MMU:
637 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
639 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
645 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
646 case KVM_CAP_PPC_HTAB_FD:
650 case KVM_CAP_NR_VCPUS:
652 * Recommending a number of CPUs is somewhat arbitrary; we
653 * return the number of present CPUs for -HV (since a host
654 * will have secondary threads "offline"), and for other KVM
655 * implementations just count online CPUs.
658 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
660 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
662 case KVM_CAP_MAX_VCPUS:
665 case KVM_CAP_MAX_VCPU_ID:
666 r = KVM_MAX_VCPU_IDS;
668 #ifdef CONFIG_PPC_BOOK3S_64
669 case KVM_CAP_PPC_GET_SMMU_INFO:
672 case KVM_CAP_SPAPR_MULTITCE:
675 case KVM_CAP_SPAPR_RESIZE_HPT:
679 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
680 case KVM_CAP_PPC_FWNMI:
684 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
685 case KVM_CAP_PPC_HTM:
686 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
687 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
690 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
691 case KVM_CAP_PPC_SECURE_GUEST:
692 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
693 !kvmppc_hv_ops->enable_svm(NULL);
695 case KVM_CAP_PPC_DAWR1:
696 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
697 !kvmppc_hv_ops->enable_dawr1(NULL));
699 case KVM_CAP_PPC_RPT_INVALIDATE:
703 case KVM_CAP_PPC_AIL_MODE_3:
706 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
707 * The POWER9s can support it if the guest runs in hash mode,
708 * but QEMU doesn't necessarily query the capability in time.
711 if (kvmhv_on_pseries()) {
712 if (pseries_reloc_on_exception())
714 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
715 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
728 long kvm_arch_dev_ioctl(struct file *filp,
729 unsigned int ioctl, unsigned long arg)
734 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
736 kvmppc_core_free_memslot(kvm, slot);
739 int kvm_arch_prepare_memory_region(struct kvm *kvm,
740 const struct kvm_memory_slot *old,
741 struct kvm_memory_slot *new,
742 enum kvm_mr_change change)
744 return kvmppc_core_prepare_memory_region(kvm, old, new, change);
747 void kvm_arch_commit_memory_region(struct kvm *kvm,
748 struct kvm_memory_slot *old,
749 const struct kvm_memory_slot *new,
750 enum kvm_mr_change change)
752 kvmppc_core_commit_memory_region(kvm, old, new, change);
755 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
756 struct kvm_memory_slot *slot)
758 kvmppc_core_flush_memslot(kvm, slot);
761 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
766 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
768 struct kvm_vcpu *vcpu;
770 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
771 kvmppc_decrementer_func(vcpu);
773 return HRTIMER_NORESTART;
776 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
780 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
781 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
783 #ifdef CONFIG_KVM_EXIT_TIMING
784 mutex_init(&vcpu->arch.exit_timing_lock);
786 err = kvmppc_subarch_vcpu_init(vcpu);
790 err = kvmppc_core_vcpu_create(vcpu);
792 goto out_vcpu_uninit;
794 rcuwait_init(&vcpu->arch.wait);
795 vcpu->arch.waitp = &vcpu->arch.wait;
799 kvmppc_subarch_vcpu_uninit(vcpu);
803 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
807 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
809 /* Make sure we're not using the vcpu anymore */
810 hrtimer_cancel(&vcpu->arch.dec_timer);
812 switch (vcpu->arch.irq_type) {
813 case KVMPPC_IRQ_MPIC:
814 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
816 case KVMPPC_IRQ_XICS:
818 kvmppc_xive_cleanup_vcpu(vcpu);
820 kvmppc_xics_free_icp(vcpu);
822 case KVMPPC_IRQ_XIVE:
823 kvmppc_xive_native_cleanup_vcpu(vcpu);
827 kvmppc_core_vcpu_free(vcpu);
829 kvmppc_subarch_vcpu_uninit(vcpu);
832 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
834 return kvmppc_core_pending_dec(vcpu);
837 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
841 * vrsave (formerly usprg0) isn't used by Linux, but may
842 * be used by the guest.
844 * On non-booke this is associated with Altivec and
845 * is handled by code in book3s.c.
847 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
849 kvmppc_core_vcpu_load(vcpu, cpu);
852 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
854 kvmppc_core_vcpu_put(vcpu);
856 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
861 * irq_bypass_add_producer and irq_bypass_del_producer are only
862 * useful if the architecture supports PCI passthrough.
863 * irq_bypass_stop and irq_bypass_start are not needed and so
864 * kvm_ops are not defined for them.
866 bool kvm_arch_has_irq_bypass(void)
868 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
869 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
872 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
873 struct irq_bypass_producer *prod)
875 struct kvm_kernel_irqfd *irqfd =
876 container_of(cons, struct kvm_kernel_irqfd, consumer);
877 struct kvm *kvm = irqfd->kvm;
879 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
880 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
885 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
886 struct irq_bypass_producer *prod)
888 struct kvm_kernel_irqfd *irqfd =
889 container_of(cons, struct kvm_kernel_irqfd, consumer);
890 struct kvm *kvm = irqfd->kvm;
892 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
893 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
897 static inline int kvmppc_get_vsr_dword_offset(int index)
901 if ((index != 0) && (index != 1))
913 static inline int kvmppc_get_vsr_word_offset(int index)
917 if ((index > 3) || (index < 0))
928 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
931 union kvmppc_one_reg val;
932 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
933 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
939 val.vval = VCPU_VSX_VR(vcpu, index - 32);
940 val.vsxval[offset] = gpr;
941 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
943 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
947 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
950 union kvmppc_one_reg val;
951 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
954 val.vval = VCPU_VSX_VR(vcpu, index - 32);
957 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
959 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
960 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
964 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
967 union kvmppc_one_reg val;
968 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
971 val.vsx32val[0] = gpr;
972 val.vsx32val[1] = gpr;
973 val.vsx32val[2] = gpr;
974 val.vsx32val[3] = gpr;
975 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
977 val.vsx32val[0] = gpr;
978 val.vsx32val[1] = gpr;
979 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
980 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
984 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
987 union kvmppc_one_reg val;
988 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
989 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
990 int dword_offset, word_offset;
996 val.vval = VCPU_VSX_VR(vcpu, index - 32);
997 val.vsx32val[offset] = gpr32;
998 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
1000 dword_offset = offset / 2;
1001 word_offset = offset % 2;
1002 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1003 val.vsx32val[word_offset] = gpr32;
1004 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1007 #endif /* CONFIG_VSX */
1009 #ifdef CONFIG_ALTIVEC
1010 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1011 int index, int element_size)
1014 int elts = sizeof(vector128)/element_size;
1016 if ((index < 0) || (index >= elts))
1019 if (kvmppc_need_byteswap(vcpu))
1020 offset = elts - index - 1;
1027 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1030 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1033 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1036 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1039 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1042 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1045 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1048 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1052 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1055 union kvmppc_one_reg val;
1056 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1057 vcpu->arch.mmio_vmx_offset);
1058 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1063 val.vval = VCPU_VSX_VR(vcpu, index);
1064 val.vsxval[offset] = gpr;
1065 VCPU_VSX_VR(vcpu, index) = val.vval;
1068 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1071 union kvmppc_one_reg val;
1072 int offset = kvmppc_get_vmx_word_offset(vcpu,
1073 vcpu->arch.mmio_vmx_offset);
1074 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1079 val.vval = VCPU_VSX_VR(vcpu, index);
1080 val.vsx32val[offset] = gpr32;
1081 VCPU_VSX_VR(vcpu, index) = val.vval;
1084 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1087 union kvmppc_one_reg val;
1088 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1089 vcpu->arch.mmio_vmx_offset);
1090 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1095 val.vval = VCPU_VSX_VR(vcpu, index);
1096 val.vsx16val[offset] = gpr16;
1097 VCPU_VSX_VR(vcpu, index) = val.vval;
1100 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1103 union kvmppc_one_reg val;
1104 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1105 vcpu->arch.mmio_vmx_offset);
1106 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1111 val.vval = VCPU_VSX_VR(vcpu, index);
1112 val.vsx8val[offset] = gpr8;
1113 VCPU_VSX_VR(vcpu, index) = val.vval;
1115 #endif /* CONFIG_ALTIVEC */
1117 #ifdef CONFIG_PPC_FPU
1118 static inline u64 sp_to_dp(u32 fprs)
1124 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1130 static inline u32 dp_to_sp(u64 fprd)
1136 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1143 #define sp_to_dp(x) (x)
1144 #define dp_to_sp(x) (x)
1145 #endif /* CONFIG_PPC_FPU */
1147 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1149 struct kvm_run *run = vcpu->run;
1152 if (run->mmio.len > sizeof(gpr))
1155 if (!vcpu->arch.mmio_host_swabbed) {
1156 switch (run->mmio.len) {
1157 case 8: gpr = *(u64 *)run->mmio.data; break;
1158 case 4: gpr = *(u32 *)run->mmio.data; break;
1159 case 2: gpr = *(u16 *)run->mmio.data; break;
1160 case 1: gpr = *(u8 *)run->mmio.data; break;
1163 switch (run->mmio.len) {
1164 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1165 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1166 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1167 case 1: gpr = *(u8 *)run->mmio.data; break;
1171 /* conversion between single and double precision */
1172 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1173 gpr = sp_to_dp(gpr);
1175 if (vcpu->arch.mmio_sign_extend) {
1176 switch (run->mmio.len) {
1179 gpr = (s64)(s32)gpr;
1183 gpr = (s64)(s16)gpr;
1191 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1192 case KVM_MMIO_REG_GPR:
1193 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1195 case KVM_MMIO_REG_FPR:
1196 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1197 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1199 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1201 #ifdef CONFIG_PPC_BOOK3S
1202 case KVM_MMIO_REG_QPR:
1203 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1205 case KVM_MMIO_REG_FQPR:
1206 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1207 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1211 case KVM_MMIO_REG_VSX:
1212 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1213 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1215 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1216 kvmppc_set_vsr_dword(vcpu, gpr);
1217 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1218 kvmppc_set_vsr_word(vcpu, gpr);
1219 else if (vcpu->arch.mmio_copy_type ==
1220 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1221 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1222 else if (vcpu->arch.mmio_copy_type ==
1223 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1224 kvmppc_set_vsr_word_dump(vcpu, gpr);
1227 #ifdef CONFIG_ALTIVEC
1228 case KVM_MMIO_REG_VMX:
1229 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1230 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1232 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1233 kvmppc_set_vmx_dword(vcpu, gpr);
1234 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1235 kvmppc_set_vmx_word(vcpu, gpr);
1236 else if (vcpu->arch.mmio_copy_type ==
1237 KVMPPC_VMX_COPY_HWORD)
1238 kvmppc_set_vmx_hword(vcpu, gpr);
1239 else if (vcpu->arch.mmio_copy_type ==
1240 KVMPPC_VMX_COPY_BYTE)
1241 kvmppc_set_vmx_byte(vcpu, gpr);
1244 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1245 case KVM_MMIO_REG_NESTED_GPR:
1246 if (kvmppc_need_byteswap(vcpu))
1248 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1257 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1258 unsigned int rt, unsigned int bytes,
1259 int is_default_endian, int sign_extend)
1261 struct kvm_run *run = vcpu->run;
1265 /* Pity C doesn't have a logical XOR operator */
1266 if (kvmppc_need_byteswap(vcpu)) {
1267 host_swabbed = is_default_endian;
1269 host_swabbed = !is_default_endian;
1272 if (bytes > sizeof(run->mmio.data))
1273 return EMULATE_FAIL;
1275 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1276 run->mmio.len = bytes;
1277 run->mmio.is_write = 0;
1279 vcpu->arch.io_gpr = rt;
1280 vcpu->arch.mmio_host_swabbed = host_swabbed;
1281 vcpu->mmio_needed = 1;
1282 vcpu->mmio_is_write = 0;
1283 vcpu->arch.mmio_sign_extend = sign_extend;
1285 idx = srcu_read_lock(&vcpu->kvm->srcu);
1287 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1288 bytes, &run->mmio.data);
1290 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1293 kvmppc_complete_mmio_load(vcpu);
1294 vcpu->mmio_needed = 0;
1295 return EMULATE_DONE;
1298 return EMULATE_DO_MMIO;
1301 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1302 unsigned int rt, unsigned int bytes,
1303 int is_default_endian)
1305 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1307 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1309 /* Same as above, but sign extends */
1310 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1311 unsigned int rt, unsigned int bytes,
1312 int is_default_endian)
1314 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1318 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1319 unsigned int rt, unsigned int bytes,
1320 int is_default_endian, int mmio_sign_extend)
1322 enum emulation_result emulated = EMULATE_DONE;
1324 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1325 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1326 return EMULATE_FAIL;
1328 while (vcpu->arch.mmio_vsx_copy_nums) {
1329 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1330 is_default_endian, mmio_sign_extend);
1332 if (emulated != EMULATE_DONE)
1335 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1337 vcpu->arch.mmio_vsx_copy_nums--;
1338 vcpu->arch.mmio_vsx_offset++;
1342 #endif /* CONFIG_VSX */
1344 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1345 u64 val, unsigned int bytes, int is_default_endian)
1347 struct kvm_run *run = vcpu->run;
1348 void *data = run->mmio.data;
1352 /* Pity C doesn't have a logical XOR operator */
1353 if (kvmppc_need_byteswap(vcpu)) {
1354 host_swabbed = is_default_endian;
1356 host_swabbed = !is_default_endian;
1359 if (bytes > sizeof(run->mmio.data))
1360 return EMULATE_FAIL;
1362 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1363 run->mmio.len = bytes;
1364 run->mmio.is_write = 1;
1365 vcpu->mmio_needed = 1;
1366 vcpu->mmio_is_write = 1;
1368 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1369 val = dp_to_sp(val);
1371 /* Store the value at the lowest bytes in 'data'. */
1372 if (!host_swabbed) {
1374 case 8: *(u64 *)data = val; break;
1375 case 4: *(u32 *)data = val; break;
1376 case 2: *(u16 *)data = val; break;
1377 case 1: *(u8 *)data = val; break;
1381 case 8: *(u64 *)data = swab64(val); break;
1382 case 4: *(u32 *)data = swab32(val); break;
1383 case 2: *(u16 *)data = swab16(val); break;
1384 case 1: *(u8 *)data = val; break;
1388 idx = srcu_read_lock(&vcpu->kvm->srcu);
1390 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1391 bytes, &run->mmio.data);
1393 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1396 vcpu->mmio_needed = 0;
1397 return EMULATE_DONE;
1400 return EMULATE_DO_MMIO;
1402 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1405 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1407 u32 dword_offset, word_offset;
1408 union kvmppc_one_reg reg;
1410 int copy_type = vcpu->arch.mmio_copy_type;
1413 switch (copy_type) {
1414 case KVMPPC_VSX_COPY_DWORD:
1416 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1418 if (vsx_offset == -1) {
1424 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1426 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1427 *val = reg.vsxval[vsx_offset];
1431 case KVMPPC_VSX_COPY_WORD:
1433 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1435 if (vsx_offset == -1) {
1441 dword_offset = vsx_offset / 2;
1442 word_offset = vsx_offset % 2;
1443 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1444 *val = reg.vsx32val[word_offset];
1446 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1447 *val = reg.vsx32val[vsx_offset];
1459 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1460 int rs, unsigned int bytes, int is_default_endian)
1463 enum emulation_result emulated = EMULATE_DONE;
1465 vcpu->arch.io_gpr = rs;
1467 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1468 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1469 return EMULATE_FAIL;
1471 while (vcpu->arch.mmio_vsx_copy_nums) {
1472 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1473 return EMULATE_FAIL;
1475 emulated = kvmppc_handle_store(vcpu,
1476 val, bytes, is_default_endian);
1478 if (emulated != EMULATE_DONE)
1481 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1483 vcpu->arch.mmio_vsx_copy_nums--;
1484 vcpu->arch.mmio_vsx_offset++;
1490 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1492 struct kvm_run *run = vcpu->run;
1493 enum emulation_result emulated = EMULATE_FAIL;
1496 vcpu->arch.paddr_accessed += run->mmio.len;
1498 if (!vcpu->mmio_is_write) {
1499 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1500 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1502 emulated = kvmppc_handle_vsx_store(vcpu,
1503 vcpu->arch.io_gpr, run->mmio.len, 1);
1507 case EMULATE_DO_MMIO:
1508 run->exit_reason = KVM_EXIT_MMIO;
1512 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1513 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1514 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1523 #endif /* CONFIG_VSX */
1525 #ifdef CONFIG_ALTIVEC
1526 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1527 unsigned int rt, unsigned int bytes, int is_default_endian)
1529 enum emulation_result emulated = EMULATE_DONE;
1531 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1532 return EMULATE_FAIL;
1534 while (vcpu->arch.mmio_vmx_copy_nums) {
1535 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1536 is_default_endian, 0);
1538 if (emulated != EMULATE_DONE)
1541 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1542 vcpu->arch.mmio_vmx_copy_nums--;
1543 vcpu->arch.mmio_vmx_offset++;
1549 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1551 union kvmppc_one_reg reg;
1556 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1558 if (vmx_offset == -1)
1561 reg.vval = VCPU_VSX_VR(vcpu, index);
1562 *val = reg.vsxval[vmx_offset];
1567 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1569 union kvmppc_one_reg reg;
1574 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1576 if (vmx_offset == -1)
1579 reg.vval = VCPU_VSX_VR(vcpu, index);
1580 *val = reg.vsx32val[vmx_offset];
1585 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1587 union kvmppc_one_reg reg;
1592 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1594 if (vmx_offset == -1)
1597 reg.vval = VCPU_VSX_VR(vcpu, index);
1598 *val = reg.vsx16val[vmx_offset];
1603 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1605 union kvmppc_one_reg reg;
1610 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1612 if (vmx_offset == -1)
1615 reg.vval = VCPU_VSX_VR(vcpu, index);
1616 *val = reg.vsx8val[vmx_offset];
1621 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1622 unsigned int rs, unsigned int bytes, int is_default_endian)
1625 unsigned int index = rs & KVM_MMIO_REG_MASK;
1626 enum emulation_result emulated = EMULATE_DONE;
1628 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1629 return EMULATE_FAIL;
1631 vcpu->arch.io_gpr = rs;
1633 while (vcpu->arch.mmio_vmx_copy_nums) {
1634 switch (vcpu->arch.mmio_copy_type) {
1635 case KVMPPC_VMX_COPY_DWORD:
1636 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1637 return EMULATE_FAIL;
1640 case KVMPPC_VMX_COPY_WORD:
1641 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1642 return EMULATE_FAIL;
1644 case KVMPPC_VMX_COPY_HWORD:
1645 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1646 return EMULATE_FAIL;
1648 case KVMPPC_VMX_COPY_BYTE:
1649 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1650 return EMULATE_FAIL;
1653 return EMULATE_FAIL;
1656 emulated = kvmppc_handle_store(vcpu, val, bytes,
1658 if (emulated != EMULATE_DONE)
1661 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1662 vcpu->arch.mmio_vmx_copy_nums--;
1663 vcpu->arch.mmio_vmx_offset++;
1669 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1671 struct kvm_run *run = vcpu->run;
1672 enum emulation_result emulated = EMULATE_FAIL;
1675 vcpu->arch.paddr_accessed += run->mmio.len;
1677 if (!vcpu->mmio_is_write) {
1678 emulated = kvmppc_handle_vmx_load(vcpu,
1679 vcpu->arch.io_gpr, run->mmio.len, 1);
1681 emulated = kvmppc_handle_vmx_store(vcpu,
1682 vcpu->arch.io_gpr, run->mmio.len, 1);
1686 case EMULATE_DO_MMIO:
1687 run->exit_reason = KVM_EXIT_MMIO;
1691 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1692 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1693 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1702 #endif /* CONFIG_ALTIVEC */
1704 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1707 union kvmppc_one_reg val;
1710 size = one_reg_size(reg->id);
1711 if (size > sizeof(val))
1714 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1718 #ifdef CONFIG_ALTIVEC
1719 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1720 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1724 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1726 case KVM_REG_PPC_VSCR:
1727 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1731 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1733 case KVM_REG_PPC_VRSAVE:
1734 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1736 #endif /* CONFIG_ALTIVEC */
1746 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1752 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1755 union kvmppc_one_reg val;
1758 size = one_reg_size(reg->id);
1759 if (size > sizeof(val))
1762 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1765 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1769 #ifdef CONFIG_ALTIVEC
1770 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1771 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1775 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1777 case KVM_REG_PPC_VSCR:
1778 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1782 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1784 case KVM_REG_PPC_VRSAVE:
1785 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1789 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1791 #endif /* CONFIG_ALTIVEC */
1801 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1803 struct kvm_run *run = vcpu->run;
1808 if (vcpu->mmio_needed) {
1809 vcpu->mmio_needed = 0;
1810 if (!vcpu->mmio_is_write)
1811 kvmppc_complete_mmio_load(vcpu);
1813 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1814 vcpu->arch.mmio_vsx_copy_nums--;
1815 vcpu->arch.mmio_vsx_offset++;
1818 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1819 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1820 if (r == RESUME_HOST) {
1821 vcpu->mmio_needed = 1;
1826 #ifdef CONFIG_ALTIVEC
1827 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1828 vcpu->arch.mmio_vmx_copy_nums--;
1829 vcpu->arch.mmio_vmx_offset++;
1832 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1833 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1834 if (r == RESUME_HOST) {
1835 vcpu->mmio_needed = 1;
1840 } else if (vcpu->arch.osi_needed) {
1841 u64 *gprs = run->osi.gprs;
1844 for (i = 0; i < 32; i++)
1845 kvmppc_set_gpr(vcpu, i, gprs[i]);
1846 vcpu->arch.osi_needed = 0;
1847 } else if (vcpu->arch.hcall_needed) {
1850 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1851 for (i = 0; i < 9; ++i)
1852 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1853 vcpu->arch.hcall_needed = 0;
1855 } else if (vcpu->arch.epr_needed) {
1856 kvmppc_set_epr(vcpu, run->epr.epr);
1857 vcpu->arch.epr_needed = 0;
1861 kvm_sigset_activate(vcpu);
1863 if (run->immediate_exit)
1866 r = kvmppc_vcpu_run(vcpu);
1868 kvm_sigset_deactivate(vcpu);
1870 #ifdef CONFIG_ALTIVEC
1875 * We're already returning to userspace, don't pass the
1876 * RESUME_HOST flags along.
1885 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1887 if (irq->irq == KVM_INTERRUPT_UNSET) {
1888 kvmppc_core_dequeue_external(vcpu);
1892 kvmppc_core_queue_external(vcpu, irq);
1894 kvm_vcpu_kick(vcpu);
1899 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1900 struct kvm_enable_cap *cap)
1908 case KVM_CAP_PPC_OSI:
1910 vcpu->arch.osi_enabled = true;
1912 case KVM_CAP_PPC_PAPR:
1914 vcpu->arch.papr_enabled = true;
1916 case KVM_CAP_PPC_EPR:
1919 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1921 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1924 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1926 vcpu->arch.watchdog_enabled = true;
1929 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1930 case KVM_CAP_SW_TLB: {
1931 struct kvm_config_tlb cfg;
1932 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1935 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1938 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1942 #ifdef CONFIG_KVM_MPIC
1943 case KVM_CAP_IRQ_MPIC: {
1945 struct kvm_device *dev;
1948 f = fdget(cap->args[0]);
1953 dev = kvm_device_from_filp(f.file);
1955 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1961 #ifdef CONFIG_KVM_XICS
1962 case KVM_CAP_IRQ_XICS: {
1964 struct kvm_device *dev;
1967 f = fdget(cap->args[0]);
1972 dev = kvm_device_from_filp(f.file);
1975 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1977 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1983 #endif /* CONFIG_KVM_XICS */
1984 #ifdef CONFIG_KVM_XIVE
1985 case KVM_CAP_PPC_IRQ_XIVE: {
1987 struct kvm_device *dev;
1990 f = fdget(cap->args[0]);
1995 if (!xive_enabled())
1999 dev = kvm_device_from_filp(f.file);
2001 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2007 #endif /* CONFIG_KVM_XIVE */
2008 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2009 case KVM_CAP_PPC_FWNMI:
2011 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2014 vcpu->kvm->arch.fwnmi_enabled = true;
2016 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2023 r = kvmppc_sanity_check(vcpu);
2028 bool kvm_arch_intc_initialized(struct kvm *kvm)
2030 #ifdef CONFIG_KVM_MPIC
2034 #ifdef CONFIG_KVM_XICS
2035 if (kvm->arch.xics || kvm->arch.xive)
2041 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2042 struct kvm_mp_state *mp_state)
2047 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2048 struct kvm_mp_state *mp_state)
2053 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2054 unsigned int ioctl, unsigned long arg)
2056 struct kvm_vcpu *vcpu = filp->private_data;
2057 void __user *argp = (void __user *)arg;
2059 if (ioctl == KVM_INTERRUPT) {
2060 struct kvm_interrupt irq;
2061 if (copy_from_user(&irq, argp, sizeof(irq)))
2063 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2065 return -ENOIOCTLCMD;
2068 long kvm_arch_vcpu_ioctl(struct file *filp,
2069 unsigned int ioctl, unsigned long arg)
2071 struct kvm_vcpu *vcpu = filp->private_data;
2072 void __user *argp = (void __user *)arg;
2076 case KVM_ENABLE_CAP:
2078 struct kvm_enable_cap cap;
2080 if (copy_from_user(&cap, argp, sizeof(cap)))
2083 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2088 case KVM_SET_ONE_REG:
2089 case KVM_GET_ONE_REG:
2091 struct kvm_one_reg reg;
2093 if (copy_from_user(®, argp, sizeof(reg)))
2095 if (ioctl == KVM_SET_ONE_REG)
2096 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2098 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2102 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2103 case KVM_DIRTY_TLB: {
2104 struct kvm_dirty_tlb dirty;
2106 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2109 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2122 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2124 return VM_FAULT_SIGBUS;
2127 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2129 u32 inst_nop = 0x60000000;
2130 #ifdef CONFIG_KVM_BOOKE_HV
2131 u32 inst_sc1 = 0x44000022;
2132 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2133 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2134 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2135 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2137 u32 inst_lis = 0x3c000000;
2138 u32 inst_ori = 0x60000000;
2139 u32 inst_sc = 0x44000002;
2140 u32 inst_imm_mask = 0xffff;
2143 * The hypercall to get into KVM from within guest context is as
2146 * lis r0, r0, KVM_SC_MAGIC_R0@h
2147 * ori r0, KVM_SC_MAGIC_R0@l
2151 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2152 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2153 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2154 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2157 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2162 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2166 #ifdef CONFIG_KVM_MPIC
2167 ret = ret || (kvm->arch.mpic != NULL);
2169 #ifdef CONFIG_KVM_XICS
2170 ret = ret || (kvm->arch.xics != NULL);
2171 ret = ret || (kvm->arch.xive != NULL);
2177 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2180 if (!kvm_arch_irqchip_in_kernel(kvm))
2183 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2184 irq_event->irq, irq_event->level,
2190 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2191 struct kvm_enable_cap *cap)
2199 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2200 case KVM_CAP_PPC_ENABLE_HCALL: {
2201 unsigned long hcall = cap->args[0];
2204 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2207 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2210 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2212 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2216 case KVM_CAP_PPC_SMT: {
2217 unsigned long mode = cap->args[0];
2218 unsigned long flags = cap->args[1];
2221 if (kvm->arch.kvm_ops->set_smt_mode)
2222 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2226 case KVM_CAP_PPC_NESTED_HV:
2228 if (!is_kvmppc_hv_enabled(kvm) ||
2229 !kvm->arch.kvm_ops->enable_nested)
2231 r = kvm->arch.kvm_ops->enable_nested(kvm);
2234 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2235 case KVM_CAP_PPC_SECURE_GUEST:
2237 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2239 r = kvm->arch.kvm_ops->enable_svm(kvm);
2241 case KVM_CAP_PPC_DAWR1:
2243 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2245 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2256 #ifdef CONFIG_PPC_BOOK3S_64
2258 * These functions check whether the underlying hardware is safe
2259 * against attacks based on observing the effects of speculatively
2260 * executed instructions, and whether it supplies instructions for
2261 * use in workarounds. The information comes from firmware, either
2262 * via the device tree on powernv platforms or from an hcall on
2263 * pseries platforms.
2265 #ifdef CONFIG_PPC_PSERIES
2266 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2268 struct h_cpu_char_result c;
2271 if (!machine_is(pseries))
2274 rc = plpar_get_cpu_characteristics(&c);
2275 if (rc == H_SUCCESS) {
2276 cp->character = c.character;
2277 cp->behaviour = c.behaviour;
2278 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2279 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2280 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2281 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2282 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2283 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2284 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2285 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2286 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2287 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2288 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2289 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2290 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2295 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2301 static inline bool have_fw_feat(struct device_node *fw_features,
2302 const char *state, const char *name)
2304 struct device_node *np;
2307 np = of_get_child_by_name(fw_features, name);
2309 r = of_property_read_bool(np, state);
2315 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2317 struct device_node *np, *fw_features;
2320 memset(cp, 0, sizeof(*cp));
2321 r = pseries_get_cpu_char(cp);
2325 np = of_find_node_by_name(NULL, "ibm,opal");
2327 fw_features = of_get_child_by_name(np, "fw-features");
2331 if (have_fw_feat(fw_features, "enabled",
2332 "inst-spec-barrier-ori31,31,0"))
2333 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2334 if (have_fw_feat(fw_features, "enabled",
2335 "fw-bcctrl-serialized"))
2336 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2337 if (have_fw_feat(fw_features, "enabled",
2338 "inst-l1d-flush-ori30,30,0"))
2339 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2340 if (have_fw_feat(fw_features, "enabled",
2341 "inst-l1d-flush-trig2"))
2342 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2343 if (have_fw_feat(fw_features, "enabled",
2344 "fw-l1d-thread-split"))
2345 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2346 if (have_fw_feat(fw_features, "enabled",
2347 "fw-count-cache-disabled"))
2348 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2349 if (have_fw_feat(fw_features, "enabled",
2350 "fw-count-cache-flush-bcctr2,0,0"))
2351 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2352 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2353 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2354 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2355 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2356 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2357 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2358 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2360 if (have_fw_feat(fw_features, "enabled",
2361 "speculation-policy-favor-security"))
2362 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2363 if (!have_fw_feat(fw_features, "disabled",
2364 "needs-l1d-flush-msr-pr-0-to-1"))
2365 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2366 if (!have_fw_feat(fw_features, "disabled",
2367 "needs-spec-barrier-for-bound-checks"))
2368 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2369 if (have_fw_feat(fw_features, "enabled",
2370 "needs-count-cache-flush-on-context-switch"))
2371 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2372 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2373 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2374 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2375 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2377 of_node_put(fw_features);
2384 long kvm_arch_vm_ioctl(struct file *filp,
2385 unsigned int ioctl, unsigned long arg)
2387 struct kvm *kvm __maybe_unused = filp->private_data;
2388 void __user *argp = (void __user *)arg;
2392 case KVM_PPC_GET_PVINFO: {
2393 struct kvm_ppc_pvinfo pvinfo;
2394 memset(&pvinfo, 0, sizeof(pvinfo));
2395 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2396 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2403 #ifdef CONFIG_SPAPR_TCE_IOMMU
2404 case KVM_CREATE_SPAPR_TCE_64: {
2405 struct kvm_create_spapr_tce_64 create_tce_64;
2408 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2410 if (create_tce_64.flags) {
2414 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2417 case KVM_CREATE_SPAPR_TCE: {
2418 struct kvm_create_spapr_tce create_tce;
2419 struct kvm_create_spapr_tce_64 create_tce_64;
2422 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2425 create_tce_64.liobn = create_tce.liobn;
2426 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2427 create_tce_64.offset = 0;
2428 create_tce_64.size = create_tce.window_size >>
2429 IOMMU_PAGE_SHIFT_4K;
2430 create_tce_64.flags = 0;
2431 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2435 #ifdef CONFIG_PPC_BOOK3S_64
2436 case KVM_PPC_GET_SMMU_INFO: {
2437 struct kvm_ppc_smmu_info info;
2438 struct kvm *kvm = filp->private_data;
2440 memset(&info, 0, sizeof(info));
2441 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2442 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2446 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2447 struct kvm *kvm = filp->private_data;
2449 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2452 case KVM_PPC_CONFIGURE_V3_MMU: {
2453 struct kvm *kvm = filp->private_data;
2454 struct kvm_ppc_mmuv3_cfg cfg;
2457 if (!kvm->arch.kvm_ops->configure_mmu)
2460 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2462 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2465 case KVM_PPC_GET_RMMU_INFO: {
2466 struct kvm *kvm = filp->private_data;
2467 struct kvm_ppc_rmmu_info info;
2470 if (!kvm->arch.kvm_ops->get_rmmu_info)
2472 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2473 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2477 case KVM_PPC_GET_CPU_CHAR: {
2478 struct kvm_ppc_cpu_char cpuchar;
2480 r = kvmppc_get_cpu_char(&cpuchar);
2481 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2485 case KVM_PPC_SVM_OFF: {
2486 struct kvm *kvm = filp->private_data;
2489 if (!kvm->arch.kvm_ops->svm_off)
2492 r = kvm->arch.kvm_ops->svm_off(kvm);
2496 struct kvm *kvm = filp->private_data;
2497 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2499 #else /* CONFIG_PPC_BOOK3S_64 */
2508 static DEFINE_IDA(lpid_inuse);
2509 static unsigned long nr_lpids;
2511 long kvmppc_alloc_lpid(void)
2515 /* The host LPID must always be 0 (allocation starts at 1) */
2516 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2518 if (lpid == -ENOMEM)
2519 pr_err("%s: Out of memory\n", __func__);
2521 pr_err("%s: No LPIDs free\n", __func__);
2527 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2529 void kvmppc_free_lpid(long lpid)
2531 ida_free(&lpid_inuse, lpid);
2533 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2535 /* nr_lpids_param includes the host LPID */
2536 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2538 nr_lpids = nr_lpids_param;
2540 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2542 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2544 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2546 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2547 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2550 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2552 if (kvm->arch.kvm_ops->create_vm_debugfs)
2553 kvm->arch.kvm_ops->create_vm_debugfs(kvm);