01d0f9935e6c2372fb814d97bd63ae8113eecf18
[linux-2.6-microblaze.git] / arch / powerpc / kvm / powerpc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  *
6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8  */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37
38 #include "timing.h"
39 #include "../mm/mmu_decl.h"
40
41 #define CREATE_TRACE_POINTS
42 #include "trace.h"
43
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
49
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 {
52         return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53 }
54
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56 {
57         return kvm_arch_vcpu_runnable(vcpu);
58 }
59
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61 {
62         return false;
63 }
64
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66 {
67         return 1;
68 }
69
70 /*
71  * Common checks before entering the guest world.  Call with interrupts
72  * disabled.
73  *
74  * returns:
75  *
76  * == 1 if we're ready to go into guest state
77  * <= 0 if we need to go back to the host with return value
78  */
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
80 {
81         int r;
82
83         WARN_ON(irqs_disabled());
84         hard_irq_disable();
85
86         while (true) {
87                 if (need_resched()) {
88                         local_irq_enable();
89                         cond_resched();
90                         hard_irq_disable();
91                         continue;
92                 }
93
94                 if (signal_pending(current)) {
95                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96                         vcpu->run->exit_reason = KVM_EXIT_INTR;
97                         r = -EINTR;
98                         break;
99                 }
100
101                 vcpu->mode = IN_GUEST_MODE;
102
103                 /*
104                  * Reading vcpu->requests must happen after setting vcpu->mode,
105                  * so we don't miss a request because the requester sees
106                  * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107                  * before next entering the guest (and thus doesn't IPI).
108                  * This also orders the write to mode from any reads
109                  * to the page tables done while the VCPU is running.
110                  * Please see the comment in kvm_flush_remote_tlbs.
111                  */
112                 smp_mb();
113
114                 if (kvm_request_pending(vcpu)) {
115                         /* Make sure we process requests preemptable */
116                         local_irq_enable();
117                         trace_kvm_check_requests(vcpu);
118                         r = kvmppc_core_check_requests(vcpu);
119                         hard_irq_disable();
120                         if (r > 0)
121                                 continue;
122                         break;
123                 }
124
125                 if (kvmppc_core_prepare_to_enter(vcpu)) {
126                         /* interrupts got enabled in between, so we
127                            are back at square 1 */
128                         continue;
129                 }
130
131                 guest_enter_irqoff();
132                 return 1;
133         }
134
135         /* return to host */
136         local_irq_enable();
137         return r;
138 }
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
140
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143 {
144         struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145         int i;
146
147         shared->sprg0 = swab64(shared->sprg0);
148         shared->sprg1 = swab64(shared->sprg1);
149         shared->sprg2 = swab64(shared->sprg2);
150         shared->sprg3 = swab64(shared->sprg3);
151         shared->srr0 = swab64(shared->srr0);
152         shared->srr1 = swab64(shared->srr1);
153         shared->dar = swab64(shared->dar);
154         shared->msr = swab64(shared->msr);
155         shared->dsisr = swab32(shared->dsisr);
156         shared->int_pending = swab32(shared->int_pending);
157         for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158                 shared->sr[i] = swab32(shared->sr[i]);
159 }
160 #endif
161
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163 {
164         int nr = kvmppc_get_gpr(vcpu, 11);
165         int r;
166         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170         unsigned long r2 = 0;
171
172         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
173                 /* 32 bit mode */
174                 param1 &= 0xffffffff;
175                 param2 &= 0xffffffff;
176                 param3 &= 0xffffffff;
177                 param4 &= 0xffffffff;
178         }
179
180         switch (nr) {
181         case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
182         {
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184                 /* Book3S can be little endian, find it out here */
185                 int shared_big_endian = true;
186                 if (vcpu->arch.intr_msr & MSR_LE)
187                         shared_big_endian = false;
188                 if (shared_big_endian != vcpu->arch.shared_big_endian)
189                         kvmppc_swab_shared(vcpu);
190                 vcpu->arch.shared_big_endian = shared_big_endian;
191 #endif
192
193                 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194                         /*
195                          * Older versions of the Linux magic page code had
196                          * a bug where they would map their trampoline code
197                          * NX. If that's the case, remove !PR NX capability.
198                          */
199                         vcpu->arch.disable_kernel_nx = true;
200                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201                 }
202
203                 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204                 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
205
206 #ifdef CONFIG_PPC_64K_PAGES
207                 /*
208                  * Make sure our 4k magic page is in the same window of a 64k
209                  * page within the guest and within the host's page.
210                  */
211                 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212                     ((ulong)vcpu->arch.shared & 0xf000)) {
213                         void *old_shared = vcpu->arch.shared;
214                         ulong shared = (ulong)vcpu->arch.shared;
215                         void *new_shared;
216
217                         shared &= PAGE_MASK;
218                         shared |= vcpu->arch.magic_page_pa & 0xf000;
219                         new_shared = (void*)shared;
220                         memcpy(new_shared, old_shared, 0x1000);
221                         vcpu->arch.shared = new_shared;
222                 }
223 #endif
224
225                 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
226
227                 r = EV_SUCCESS;
228                 break;
229         }
230         case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231                 r = EV_SUCCESS;
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234 #endif
235
236                 /* Second return value is in r4 */
237                 break;
238         case EV_HCALL_TOKEN(EV_IDLE):
239                 r = EV_SUCCESS;
240                 kvm_vcpu_halt(vcpu);
241                 break;
242         default:
243                 r = EV_UNIMPLEMENTED;
244                 break;
245         }
246
247         kvmppc_set_gpr(vcpu, 4, r2);
248
249         return r;
250 }
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254 {
255         int r = false;
256
257         /* We have to know what CPU to virtualize */
258         if (!vcpu->arch.pvr)
259                 goto out;
260
261         /* PAPR only works with book3s_64 */
262         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263                 goto out;
264
265         /* HV KVM can only do PAPR mode for now */
266         if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267                 goto out;
268
269 #ifdef CONFIG_KVM_BOOKE_HV
270         if (!cpu_has_feature(CPU_FTR_EMB_HV))
271                 goto out;
272 #endif
273
274         r = true;
275
276 out:
277         vcpu->arch.sane = r;
278         return r ? 0 : -EINVAL;
279 }
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283 {
284         enum emulation_result er;
285         int r;
286
287         er = kvmppc_emulate_loadstore(vcpu);
288         switch (er) {
289         case EMULATE_DONE:
290                 /* Future optimization: only reload non-volatiles if they were
291                  * actually modified. */
292                 r = RESUME_GUEST_NV;
293                 break;
294         case EMULATE_AGAIN:
295                 r = RESUME_GUEST;
296                 break;
297         case EMULATE_DO_MMIO:
298                 vcpu->run->exit_reason = KVM_EXIT_MMIO;
299                 /* We must reload nonvolatiles because "update" load/store
300                  * instructions modify register state. */
301                 /* Future optimization: only reload non-volatiles if they were
302                  * actually modified. */
303                 r = RESUME_HOST_NV;
304                 break;
305         case EMULATE_FAIL:
306         {
307                 u32 last_inst;
308
309                 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310                 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311                                       last_inst);
312
313                 /*
314                  * Injecting a Data Storage here is a bit more
315                  * accurate since the instruction that caused the
316                  * access could still be a valid one.
317                  */
318                 if (!IS_ENABLED(CONFIG_BOOKE)) {
319                         ulong dsisr = DSISR_BADACCESS;
320
321                         if (vcpu->mmio_is_write)
322                                 dsisr |= DSISR_ISSTORE;
323
324                         kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
325                 } else {
326                         /*
327                          * BookE does not send a SIGBUS on a bad
328                          * fault, so use a Program interrupt instead
329                          * to avoid a fault loop.
330                          */
331                         kvmppc_core_queue_program(vcpu, 0);
332                 }
333
334                 r = RESUME_GUEST;
335                 break;
336         }
337         default:
338                 WARN_ON(1);
339                 r = RESUME_GUEST;
340         }
341
342         return r;
343 }
344 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
345
346 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
347               bool data)
348 {
349         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
350         struct kvmppc_pte pte;
351         int r = -EINVAL;
352
353         vcpu->stat.st++;
354
355         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
356                 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
357                                                             size);
358
359         if ((!r) || (r == -EAGAIN))
360                 return r;
361
362         r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
363                          XLATE_WRITE, &pte);
364         if (r < 0)
365                 return r;
366
367         *eaddr = pte.raddr;
368
369         if (!pte.may_write)
370                 return -EPERM;
371
372         /* Magic page override */
373         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
374             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
375             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
376                 void *magic = vcpu->arch.shared;
377                 magic += pte.eaddr & 0xfff;
378                 memcpy(magic, ptr, size);
379                 return EMULATE_DONE;
380         }
381
382         if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
383                 return EMULATE_DO_MMIO;
384
385         return EMULATE_DONE;
386 }
387 EXPORT_SYMBOL_GPL(kvmppc_st);
388
389 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
390                       bool data)
391 {
392         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
393         struct kvmppc_pte pte;
394         int rc = -EINVAL;
395
396         vcpu->stat.ld++;
397
398         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
399                 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
400                                                               size);
401
402         if ((!rc) || (rc == -EAGAIN))
403                 return rc;
404
405         rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
406                           XLATE_READ, &pte);
407         if (rc)
408                 return rc;
409
410         *eaddr = pte.raddr;
411
412         if (!pte.may_read)
413                 return -EPERM;
414
415         if (!data && !pte.may_execute)
416                 return -ENOEXEC;
417
418         /* Magic page override */
419         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
420             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
421             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
422                 void *magic = vcpu->arch.shared;
423                 magic += pte.eaddr & 0xfff;
424                 memcpy(ptr, magic, size);
425                 return EMULATE_DONE;
426         }
427
428         kvm_vcpu_srcu_read_lock(vcpu);
429         rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
430         kvm_vcpu_srcu_read_unlock(vcpu);
431         if (rc)
432                 return EMULATE_DO_MMIO;
433
434         return EMULATE_DONE;
435 }
436 EXPORT_SYMBOL_GPL(kvmppc_ld);
437
438 int kvm_arch_hardware_enable(void)
439 {
440         return 0;
441 }
442
443 int kvm_arch_check_processor_compat(void *opaque)
444 {
445         return 0;
446 }
447
448 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
449 {
450         struct kvmppc_ops *kvm_ops = NULL;
451         int r;
452
453         /*
454          * if we have both HV and PR enabled, default is HV
455          */
456         if (type == 0) {
457                 if (kvmppc_hv_ops)
458                         kvm_ops = kvmppc_hv_ops;
459                 else
460                         kvm_ops = kvmppc_pr_ops;
461                 if (!kvm_ops)
462                         goto err_out;
463         } else  if (type == KVM_VM_PPC_HV) {
464                 if (!kvmppc_hv_ops)
465                         goto err_out;
466                 kvm_ops = kvmppc_hv_ops;
467         } else if (type == KVM_VM_PPC_PR) {
468                 if (!kvmppc_pr_ops)
469                         goto err_out;
470                 kvm_ops = kvmppc_pr_ops;
471         } else
472                 goto err_out;
473
474         if (!try_module_get(kvm_ops->owner))
475                 return -ENOENT;
476
477         kvm->arch.kvm_ops = kvm_ops;
478         r = kvmppc_core_init_vm(kvm);
479         if (r)
480                 module_put(kvm_ops->owner);
481         return r;
482 err_out:
483         return -EINVAL;
484 }
485
486 void kvm_arch_destroy_vm(struct kvm *kvm)
487 {
488 #ifdef CONFIG_KVM_XICS
489         /*
490          * We call kick_all_cpus_sync() to ensure that all
491          * CPUs have executed any pending IPIs before we
492          * continue and free VCPUs structures below.
493          */
494         if (is_kvmppc_hv_enabled(kvm))
495                 kick_all_cpus_sync();
496 #endif
497
498         kvm_destroy_vcpus(kvm);
499
500         mutex_lock(&kvm->lock);
501
502         kvmppc_core_destroy_vm(kvm);
503
504         mutex_unlock(&kvm->lock);
505
506         /* drop the module reference */
507         module_put(kvm->arch.kvm_ops->owner);
508 }
509
510 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
511 {
512         int r;
513         /* Assume we're using HV mode when the HV module is loaded */
514         int hv_enabled = kvmppc_hv_ops ? 1 : 0;
515
516         if (kvm) {
517                 /*
518                  * Hooray - we know which VM type we're running on. Depend on
519                  * that rather than the guess above.
520                  */
521                 hv_enabled = is_kvmppc_hv_enabled(kvm);
522         }
523
524         switch (ext) {
525 #ifdef CONFIG_BOOKE
526         case KVM_CAP_PPC_BOOKE_SREGS:
527         case KVM_CAP_PPC_BOOKE_WATCHDOG:
528         case KVM_CAP_PPC_EPR:
529 #else
530         case KVM_CAP_PPC_SEGSTATE:
531         case KVM_CAP_PPC_HIOR:
532         case KVM_CAP_PPC_PAPR:
533 #endif
534         case KVM_CAP_PPC_UNSET_IRQ:
535         case KVM_CAP_PPC_IRQ_LEVEL:
536         case KVM_CAP_ENABLE_CAP:
537         case KVM_CAP_ONE_REG:
538         case KVM_CAP_IOEVENTFD:
539         case KVM_CAP_DEVICE_CTRL:
540         case KVM_CAP_IMMEDIATE_EXIT:
541         case KVM_CAP_SET_GUEST_DEBUG:
542                 r = 1;
543                 break;
544         case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
545         case KVM_CAP_PPC_PAIRED_SINGLES:
546         case KVM_CAP_PPC_OSI:
547         case KVM_CAP_PPC_GET_PVINFO:
548 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
549         case KVM_CAP_SW_TLB:
550 #endif
551                 /* We support this only for PR */
552                 r = !hv_enabled;
553                 break;
554 #ifdef CONFIG_KVM_MPIC
555         case KVM_CAP_IRQ_MPIC:
556                 r = 1;
557                 break;
558 #endif
559
560 #ifdef CONFIG_PPC_BOOK3S_64
561         case KVM_CAP_SPAPR_TCE:
562         case KVM_CAP_SPAPR_TCE_64:
563                 r = 1;
564                 break;
565         case KVM_CAP_SPAPR_TCE_VFIO:
566                 r = !!cpu_has_feature(CPU_FTR_HVMODE);
567                 break;
568         case KVM_CAP_PPC_RTAS:
569         case KVM_CAP_PPC_FIXUP_HCALL:
570         case KVM_CAP_PPC_ENABLE_HCALL:
571 #ifdef CONFIG_KVM_XICS
572         case KVM_CAP_IRQ_XICS:
573 #endif
574         case KVM_CAP_PPC_GET_CPU_CHAR:
575                 r = 1;
576                 break;
577 #ifdef CONFIG_KVM_XIVE
578         case KVM_CAP_PPC_IRQ_XIVE:
579                 /*
580                  * We need XIVE to be enabled on the platform (implies
581                  * a POWER9 processor) and the PowerNV platform, as
582                  * nested is not yet supported.
583                  */
584                 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
585                         kvmppc_xive_native_supported();
586                 break;
587 #endif
588
589         case KVM_CAP_PPC_ALLOC_HTAB:
590                 r = hv_enabled;
591                 break;
592 #endif /* CONFIG_PPC_BOOK3S_64 */
593 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
594         case KVM_CAP_PPC_SMT:
595                 r = 0;
596                 if (kvm) {
597                         if (kvm->arch.emul_smt_mode > 1)
598                                 r = kvm->arch.emul_smt_mode;
599                         else
600                                 r = kvm->arch.smt_mode;
601                 } else if (hv_enabled) {
602                         if (cpu_has_feature(CPU_FTR_ARCH_300))
603                                 r = 1;
604                         else
605                                 r = threads_per_subcore;
606                 }
607                 break;
608         case KVM_CAP_PPC_SMT_POSSIBLE:
609                 r = 1;
610                 if (hv_enabled) {
611                         if (!cpu_has_feature(CPU_FTR_ARCH_300))
612                                 r = ((threads_per_subcore << 1) - 1);
613                         else
614                                 /* P9 can emulate dbells, so allow any mode */
615                                 r = 8 | 4 | 2 | 1;
616                 }
617                 break;
618         case KVM_CAP_PPC_RMA:
619                 r = 0;
620                 break;
621         case KVM_CAP_PPC_HWRNG:
622                 r = kvmppc_hwrng_present();
623                 break;
624         case KVM_CAP_PPC_MMU_RADIX:
625                 r = !!(hv_enabled && radix_enabled());
626                 break;
627         case KVM_CAP_PPC_MMU_HASH_V3:
628                 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
629                        kvmppc_hv_ops->hash_v3_possible());
630                 break;
631         case KVM_CAP_PPC_NESTED_HV:
632                 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
633                        !kvmppc_hv_ops->enable_nested(NULL));
634                 break;
635 #endif
636         case KVM_CAP_SYNC_MMU:
637 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
638                 r = hv_enabled;
639 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
640                 r = 1;
641 #else
642                 r = 0;
643 #endif
644                 break;
645 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
646         case KVM_CAP_PPC_HTAB_FD:
647                 r = hv_enabled;
648                 break;
649 #endif
650         case KVM_CAP_NR_VCPUS:
651                 /*
652                  * Recommending a number of CPUs is somewhat arbitrary; we
653                  * return the number of present CPUs for -HV (since a host
654                  * will have secondary threads "offline"), and for other KVM
655                  * implementations just count online CPUs.
656                  */
657                 if (hv_enabled)
658                         r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
659                 else
660                         r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
661                 break;
662         case KVM_CAP_MAX_VCPUS:
663                 r = KVM_MAX_VCPUS;
664                 break;
665         case KVM_CAP_MAX_VCPU_ID:
666                 r = KVM_MAX_VCPU_IDS;
667                 break;
668 #ifdef CONFIG_PPC_BOOK3S_64
669         case KVM_CAP_PPC_GET_SMMU_INFO:
670                 r = 1;
671                 break;
672         case KVM_CAP_SPAPR_MULTITCE:
673                 r = 1;
674                 break;
675         case KVM_CAP_SPAPR_RESIZE_HPT:
676                 r = !!hv_enabled;
677                 break;
678 #endif
679 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
680         case KVM_CAP_PPC_FWNMI:
681                 r = hv_enabled;
682                 break;
683 #endif
684 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
685         case KVM_CAP_PPC_HTM:
686                 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
687                      (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
688                 break;
689 #endif
690 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
691         case KVM_CAP_PPC_SECURE_GUEST:
692                 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
693                         !kvmppc_hv_ops->enable_svm(NULL);
694                 break;
695         case KVM_CAP_PPC_DAWR1:
696                 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
697                        !kvmppc_hv_ops->enable_dawr1(NULL));
698                 break;
699         case KVM_CAP_PPC_RPT_INVALIDATE:
700                 r = 1;
701                 break;
702 #endif
703         case KVM_CAP_PPC_AIL_MODE_3:
704                 r = 0;
705                 /*
706                  * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
707                  * The POWER9s can support it if the guest runs in hash mode,
708                  * but QEMU doesn't necessarily query the capability in time.
709                  */
710                 if (hv_enabled) {
711                         if (kvmhv_on_pseries()) {
712                                 if (pseries_reloc_on_exception())
713                                         r = 1;
714                         } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
715                                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
716                                 r = 1;
717                         }
718                 }
719                 break;
720         default:
721                 r = 0;
722                 break;
723         }
724         return r;
725
726 }
727
728 long kvm_arch_dev_ioctl(struct file *filp,
729                         unsigned int ioctl, unsigned long arg)
730 {
731         return -EINVAL;
732 }
733
734 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
735 {
736         kvmppc_core_free_memslot(kvm, slot);
737 }
738
739 int kvm_arch_prepare_memory_region(struct kvm *kvm,
740                                    const struct kvm_memory_slot *old,
741                                    struct kvm_memory_slot *new,
742                                    enum kvm_mr_change change)
743 {
744         return kvmppc_core_prepare_memory_region(kvm, old, new, change);
745 }
746
747 void kvm_arch_commit_memory_region(struct kvm *kvm,
748                                    struct kvm_memory_slot *old,
749                                    const struct kvm_memory_slot *new,
750                                    enum kvm_mr_change change)
751 {
752         kvmppc_core_commit_memory_region(kvm, old, new, change);
753 }
754
755 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
756                                    struct kvm_memory_slot *slot)
757 {
758         kvmppc_core_flush_memslot(kvm, slot);
759 }
760
761 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
762 {
763         return 0;
764 }
765
766 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
767 {
768         struct kvm_vcpu *vcpu;
769
770         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
771         kvmppc_decrementer_func(vcpu);
772
773         return HRTIMER_NORESTART;
774 }
775
776 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
777 {
778         int err;
779
780         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
781         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
782
783 #ifdef CONFIG_KVM_EXIT_TIMING
784         mutex_init(&vcpu->arch.exit_timing_lock);
785 #endif
786         err = kvmppc_subarch_vcpu_init(vcpu);
787         if (err)
788                 return err;
789
790         err = kvmppc_core_vcpu_create(vcpu);
791         if (err)
792                 goto out_vcpu_uninit;
793
794         rcuwait_init(&vcpu->arch.wait);
795         vcpu->arch.waitp = &vcpu->arch.wait;
796         return 0;
797
798 out_vcpu_uninit:
799         kvmppc_subarch_vcpu_uninit(vcpu);
800         return err;
801 }
802
803 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
804 {
805 }
806
807 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
808 {
809         /* Make sure we're not using the vcpu anymore */
810         hrtimer_cancel(&vcpu->arch.dec_timer);
811
812         switch (vcpu->arch.irq_type) {
813         case KVMPPC_IRQ_MPIC:
814                 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
815                 break;
816         case KVMPPC_IRQ_XICS:
817                 if (xics_on_xive())
818                         kvmppc_xive_cleanup_vcpu(vcpu);
819                 else
820                         kvmppc_xics_free_icp(vcpu);
821                 break;
822         case KVMPPC_IRQ_XIVE:
823                 kvmppc_xive_native_cleanup_vcpu(vcpu);
824                 break;
825         }
826
827         kvmppc_core_vcpu_free(vcpu);
828
829         kvmppc_subarch_vcpu_uninit(vcpu);
830 }
831
832 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
833 {
834         return kvmppc_core_pending_dec(vcpu);
835 }
836
837 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
838 {
839 #ifdef CONFIG_BOOKE
840         /*
841          * vrsave (formerly usprg0) isn't used by Linux, but may
842          * be used by the guest.
843          *
844          * On non-booke this is associated with Altivec and
845          * is handled by code in book3s.c.
846          */
847         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
848 #endif
849         kvmppc_core_vcpu_load(vcpu, cpu);
850 }
851
852 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
853 {
854         kvmppc_core_vcpu_put(vcpu);
855 #ifdef CONFIG_BOOKE
856         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
857 #endif
858 }
859
860 /*
861  * irq_bypass_add_producer and irq_bypass_del_producer are only
862  * useful if the architecture supports PCI passthrough.
863  * irq_bypass_stop and irq_bypass_start are not needed and so
864  * kvm_ops are not defined for them.
865  */
866 bool kvm_arch_has_irq_bypass(void)
867 {
868         return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
869                 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
870 }
871
872 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
873                                      struct irq_bypass_producer *prod)
874 {
875         struct kvm_kernel_irqfd *irqfd =
876                 container_of(cons, struct kvm_kernel_irqfd, consumer);
877         struct kvm *kvm = irqfd->kvm;
878
879         if (kvm->arch.kvm_ops->irq_bypass_add_producer)
880                 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
881
882         return 0;
883 }
884
885 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
886                                       struct irq_bypass_producer *prod)
887 {
888         struct kvm_kernel_irqfd *irqfd =
889                 container_of(cons, struct kvm_kernel_irqfd, consumer);
890         struct kvm *kvm = irqfd->kvm;
891
892         if (kvm->arch.kvm_ops->irq_bypass_del_producer)
893                 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
894 }
895
896 #ifdef CONFIG_VSX
897 static inline int kvmppc_get_vsr_dword_offset(int index)
898 {
899         int offset;
900
901         if ((index != 0) && (index != 1))
902                 return -1;
903
904 #ifdef __BIG_ENDIAN
905         offset =  index;
906 #else
907         offset = 1 - index;
908 #endif
909
910         return offset;
911 }
912
913 static inline int kvmppc_get_vsr_word_offset(int index)
914 {
915         int offset;
916
917         if ((index > 3) || (index < 0))
918                 return -1;
919
920 #ifdef __BIG_ENDIAN
921         offset = index;
922 #else
923         offset = 3 - index;
924 #endif
925         return offset;
926 }
927
928 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
929         u64 gpr)
930 {
931         union kvmppc_one_reg val;
932         int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
933         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
934
935         if (offset == -1)
936                 return;
937
938         if (index >= 32) {
939                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
940                 val.vsxval[offset] = gpr;
941                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
942         } else {
943                 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
944         }
945 }
946
947 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
948         u64 gpr)
949 {
950         union kvmppc_one_reg val;
951         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
952
953         if (index >= 32) {
954                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
955                 val.vsxval[0] = gpr;
956                 val.vsxval[1] = gpr;
957                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
958         } else {
959                 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
960                 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
961         }
962 }
963
964 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
965         u32 gpr)
966 {
967         union kvmppc_one_reg val;
968         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
969
970         if (index >= 32) {
971                 val.vsx32val[0] = gpr;
972                 val.vsx32val[1] = gpr;
973                 val.vsx32val[2] = gpr;
974                 val.vsx32val[3] = gpr;
975                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
976         } else {
977                 val.vsx32val[0] = gpr;
978                 val.vsx32val[1] = gpr;
979                 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
980                 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
981         }
982 }
983
984 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
985         u32 gpr32)
986 {
987         union kvmppc_one_reg val;
988         int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
989         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
990         int dword_offset, word_offset;
991
992         if (offset == -1)
993                 return;
994
995         if (index >= 32) {
996                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
997                 val.vsx32val[offset] = gpr32;
998                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
999         } else {
1000                 dword_offset = offset / 2;
1001                 word_offset = offset % 2;
1002                 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1003                 val.vsx32val[word_offset] = gpr32;
1004                 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1005         }
1006 }
1007 #endif /* CONFIG_VSX */
1008
1009 #ifdef CONFIG_ALTIVEC
1010 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1011                 int index, int element_size)
1012 {
1013         int offset;
1014         int elts = sizeof(vector128)/element_size;
1015
1016         if ((index < 0) || (index >= elts))
1017                 return -1;
1018
1019         if (kvmppc_need_byteswap(vcpu))
1020                 offset = elts - index - 1;
1021         else
1022                 offset = index;
1023
1024         return offset;
1025 }
1026
1027 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1028                 int index)
1029 {
1030         return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1031 }
1032
1033 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1034                 int index)
1035 {
1036         return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1037 }
1038
1039 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1040                 int index)
1041 {
1042         return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1043 }
1044
1045 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1046                 int index)
1047 {
1048         return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1049 }
1050
1051
1052 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1053         u64 gpr)
1054 {
1055         union kvmppc_one_reg val;
1056         int offset = kvmppc_get_vmx_dword_offset(vcpu,
1057                         vcpu->arch.mmio_vmx_offset);
1058         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1059
1060         if (offset == -1)
1061                 return;
1062
1063         val.vval = VCPU_VSX_VR(vcpu, index);
1064         val.vsxval[offset] = gpr;
1065         VCPU_VSX_VR(vcpu, index) = val.vval;
1066 }
1067
1068 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1069         u32 gpr32)
1070 {
1071         union kvmppc_one_reg val;
1072         int offset = kvmppc_get_vmx_word_offset(vcpu,
1073                         vcpu->arch.mmio_vmx_offset);
1074         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1075
1076         if (offset == -1)
1077                 return;
1078
1079         val.vval = VCPU_VSX_VR(vcpu, index);
1080         val.vsx32val[offset] = gpr32;
1081         VCPU_VSX_VR(vcpu, index) = val.vval;
1082 }
1083
1084 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1085         u16 gpr16)
1086 {
1087         union kvmppc_one_reg val;
1088         int offset = kvmppc_get_vmx_hword_offset(vcpu,
1089                         vcpu->arch.mmio_vmx_offset);
1090         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1091
1092         if (offset == -1)
1093                 return;
1094
1095         val.vval = VCPU_VSX_VR(vcpu, index);
1096         val.vsx16val[offset] = gpr16;
1097         VCPU_VSX_VR(vcpu, index) = val.vval;
1098 }
1099
1100 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1101         u8 gpr8)
1102 {
1103         union kvmppc_one_reg val;
1104         int offset = kvmppc_get_vmx_byte_offset(vcpu,
1105                         vcpu->arch.mmio_vmx_offset);
1106         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1107
1108         if (offset == -1)
1109                 return;
1110
1111         val.vval = VCPU_VSX_VR(vcpu, index);
1112         val.vsx8val[offset] = gpr8;
1113         VCPU_VSX_VR(vcpu, index) = val.vval;
1114 }
1115 #endif /* CONFIG_ALTIVEC */
1116
1117 #ifdef CONFIG_PPC_FPU
1118 static inline u64 sp_to_dp(u32 fprs)
1119 {
1120         u64 fprd;
1121
1122         preempt_disable();
1123         enable_kernel_fp();
1124         asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1125              : "fr0");
1126         preempt_enable();
1127         return fprd;
1128 }
1129
1130 static inline u32 dp_to_sp(u64 fprd)
1131 {
1132         u32 fprs;
1133
1134         preempt_disable();
1135         enable_kernel_fp();
1136         asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1137              : "fr0");
1138         preempt_enable();
1139         return fprs;
1140 }
1141
1142 #else
1143 #define sp_to_dp(x)     (x)
1144 #define dp_to_sp(x)     (x)
1145 #endif /* CONFIG_PPC_FPU */
1146
1147 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1148 {
1149         struct kvm_run *run = vcpu->run;
1150         u64 gpr;
1151
1152         if (run->mmio.len > sizeof(gpr))
1153                 return;
1154
1155         if (!vcpu->arch.mmio_host_swabbed) {
1156                 switch (run->mmio.len) {
1157                 case 8: gpr = *(u64 *)run->mmio.data; break;
1158                 case 4: gpr = *(u32 *)run->mmio.data; break;
1159                 case 2: gpr = *(u16 *)run->mmio.data; break;
1160                 case 1: gpr = *(u8 *)run->mmio.data; break;
1161                 }
1162         } else {
1163                 switch (run->mmio.len) {
1164                 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1165                 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1166                 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1167                 case 1: gpr = *(u8 *)run->mmio.data; break;
1168                 }
1169         }
1170
1171         /* conversion between single and double precision */
1172         if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1173                 gpr = sp_to_dp(gpr);
1174
1175         if (vcpu->arch.mmio_sign_extend) {
1176                 switch (run->mmio.len) {
1177 #ifdef CONFIG_PPC64
1178                 case 4:
1179                         gpr = (s64)(s32)gpr;
1180                         break;
1181 #endif
1182                 case 2:
1183                         gpr = (s64)(s16)gpr;
1184                         break;
1185                 case 1:
1186                         gpr = (s64)(s8)gpr;
1187                         break;
1188                 }
1189         }
1190
1191         switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1192         case KVM_MMIO_REG_GPR:
1193                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1194                 break;
1195         case KVM_MMIO_REG_FPR:
1196                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1197                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1198
1199                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1200                 break;
1201 #ifdef CONFIG_PPC_BOOK3S
1202         case KVM_MMIO_REG_QPR:
1203                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1204                 break;
1205         case KVM_MMIO_REG_FQPR:
1206                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1207                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1208                 break;
1209 #endif
1210 #ifdef CONFIG_VSX
1211         case KVM_MMIO_REG_VSX:
1212                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1213                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1214
1215                 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1216                         kvmppc_set_vsr_dword(vcpu, gpr);
1217                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1218                         kvmppc_set_vsr_word(vcpu, gpr);
1219                 else if (vcpu->arch.mmio_copy_type ==
1220                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1221                         kvmppc_set_vsr_dword_dump(vcpu, gpr);
1222                 else if (vcpu->arch.mmio_copy_type ==
1223                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1224                         kvmppc_set_vsr_word_dump(vcpu, gpr);
1225                 break;
1226 #endif
1227 #ifdef CONFIG_ALTIVEC
1228         case KVM_MMIO_REG_VMX:
1229                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1230                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1231
1232                 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1233                         kvmppc_set_vmx_dword(vcpu, gpr);
1234                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1235                         kvmppc_set_vmx_word(vcpu, gpr);
1236                 else if (vcpu->arch.mmio_copy_type ==
1237                                 KVMPPC_VMX_COPY_HWORD)
1238                         kvmppc_set_vmx_hword(vcpu, gpr);
1239                 else if (vcpu->arch.mmio_copy_type ==
1240                                 KVMPPC_VMX_COPY_BYTE)
1241                         kvmppc_set_vmx_byte(vcpu, gpr);
1242                 break;
1243 #endif
1244 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1245         case KVM_MMIO_REG_NESTED_GPR:
1246                 if (kvmppc_need_byteswap(vcpu))
1247                         gpr = swab64(gpr);
1248                 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1249                                      sizeof(gpr));
1250                 break;
1251 #endif
1252         default:
1253                 BUG();
1254         }
1255 }
1256
1257 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1258                                 unsigned int rt, unsigned int bytes,
1259                                 int is_default_endian, int sign_extend)
1260 {
1261         struct kvm_run *run = vcpu->run;
1262         int idx, ret;
1263         bool host_swabbed;
1264
1265         /* Pity C doesn't have a logical XOR operator */
1266         if (kvmppc_need_byteswap(vcpu)) {
1267                 host_swabbed = is_default_endian;
1268         } else {
1269                 host_swabbed = !is_default_endian;
1270         }
1271
1272         if (bytes > sizeof(run->mmio.data))
1273                 return EMULATE_FAIL;
1274
1275         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1276         run->mmio.len = bytes;
1277         run->mmio.is_write = 0;
1278
1279         vcpu->arch.io_gpr = rt;
1280         vcpu->arch.mmio_host_swabbed = host_swabbed;
1281         vcpu->mmio_needed = 1;
1282         vcpu->mmio_is_write = 0;
1283         vcpu->arch.mmio_sign_extend = sign_extend;
1284
1285         idx = srcu_read_lock(&vcpu->kvm->srcu);
1286
1287         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1288                               bytes, &run->mmio.data);
1289
1290         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1291
1292         if (!ret) {
1293                 kvmppc_complete_mmio_load(vcpu);
1294                 vcpu->mmio_needed = 0;
1295                 return EMULATE_DONE;
1296         }
1297
1298         return EMULATE_DO_MMIO;
1299 }
1300
1301 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1302                        unsigned int rt, unsigned int bytes,
1303                        int is_default_endian)
1304 {
1305         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1306 }
1307 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1308
1309 /* Same as above, but sign extends */
1310 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1311                         unsigned int rt, unsigned int bytes,
1312                         int is_default_endian)
1313 {
1314         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1315 }
1316
1317 #ifdef CONFIG_VSX
1318 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1319                         unsigned int rt, unsigned int bytes,
1320                         int is_default_endian, int mmio_sign_extend)
1321 {
1322         enum emulation_result emulated = EMULATE_DONE;
1323
1324         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1325         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1326                 return EMULATE_FAIL;
1327
1328         while (vcpu->arch.mmio_vsx_copy_nums) {
1329                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1330                         is_default_endian, mmio_sign_extend);
1331
1332                 if (emulated != EMULATE_DONE)
1333                         break;
1334
1335                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1336
1337                 vcpu->arch.mmio_vsx_copy_nums--;
1338                 vcpu->arch.mmio_vsx_offset++;
1339         }
1340         return emulated;
1341 }
1342 #endif /* CONFIG_VSX */
1343
1344 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1345                         u64 val, unsigned int bytes, int is_default_endian)
1346 {
1347         struct kvm_run *run = vcpu->run;
1348         void *data = run->mmio.data;
1349         int idx, ret;
1350         bool host_swabbed;
1351
1352         /* Pity C doesn't have a logical XOR operator */
1353         if (kvmppc_need_byteswap(vcpu)) {
1354                 host_swabbed = is_default_endian;
1355         } else {
1356                 host_swabbed = !is_default_endian;
1357         }
1358
1359         if (bytes > sizeof(run->mmio.data))
1360                 return EMULATE_FAIL;
1361
1362         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1363         run->mmio.len = bytes;
1364         run->mmio.is_write = 1;
1365         vcpu->mmio_needed = 1;
1366         vcpu->mmio_is_write = 1;
1367
1368         if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1369                 val = dp_to_sp(val);
1370
1371         /* Store the value at the lowest bytes in 'data'. */
1372         if (!host_swabbed) {
1373                 switch (bytes) {
1374                 case 8: *(u64 *)data = val; break;
1375                 case 4: *(u32 *)data = val; break;
1376                 case 2: *(u16 *)data = val; break;
1377                 case 1: *(u8  *)data = val; break;
1378                 }
1379         } else {
1380                 switch (bytes) {
1381                 case 8: *(u64 *)data = swab64(val); break;
1382                 case 4: *(u32 *)data = swab32(val); break;
1383                 case 2: *(u16 *)data = swab16(val); break;
1384                 case 1: *(u8  *)data = val; break;
1385                 }
1386         }
1387
1388         idx = srcu_read_lock(&vcpu->kvm->srcu);
1389
1390         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1391                                bytes, &run->mmio.data);
1392
1393         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1394
1395         if (!ret) {
1396                 vcpu->mmio_needed = 0;
1397                 return EMULATE_DONE;
1398         }
1399
1400         return EMULATE_DO_MMIO;
1401 }
1402 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1403
1404 #ifdef CONFIG_VSX
1405 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1406 {
1407         u32 dword_offset, word_offset;
1408         union kvmppc_one_reg reg;
1409         int vsx_offset = 0;
1410         int copy_type = vcpu->arch.mmio_copy_type;
1411         int result = 0;
1412
1413         switch (copy_type) {
1414         case KVMPPC_VSX_COPY_DWORD:
1415                 vsx_offset =
1416                         kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1417
1418                 if (vsx_offset == -1) {
1419                         result = -1;
1420                         break;
1421                 }
1422
1423                 if (rs < 32) {
1424                         *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1425                 } else {
1426                         reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1427                         *val = reg.vsxval[vsx_offset];
1428                 }
1429                 break;
1430
1431         case KVMPPC_VSX_COPY_WORD:
1432                 vsx_offset =
1433                         kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1434
1435                 if (vsx_offset == -1) {
1436                         result = -1;
1437                         break;
1438                 }
1439
1440                 if (rs < 32) {
1441                         dword_offset = vsx_offset / 2;
1442                         word_offset = vsx_offset % 2;
1443                         reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1444                         *val = reg.vsx32val[word_offset];
1445                 } else {
1446                         reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1447                         *val = reg.vsx32val[vsx_offset];
1448                 }
1449                 break;
1450
1451         default:
1452                 result = -1;
1453                 break;
1454         }
1455
1456         return result;
1457 }
1458
1459 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1460                         int rs, unsigned int bytes, int is_default_endian)
1461 {
1462         u64 val;
1463         enum emulation_result emulated = EMULATE_DONE;
1464
1465         vcpu->arch.io_gpr = rs;
1466
1467         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1468         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1469                 return EMULATE_FAIL;
1470
1471         while (vcpu->arch.mmio_vsx_copy_nums) {
1472                 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1473                         return EMULATE_FAIL;
1474
1475                 emulated = kvmppc_handle_store(vcpu,
1476                          val, bytes, is_default_endian);
1477
1478                 if (emulated != EMULATE_DONE)
1479                         break;
1480
1481                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1482
1483                 vcpu->arch.mmio_vsx_copy_nums--;
1484                 vcpu->arch.mmio_vsx_offset++;
1485         }
1486
1487         return emulated;
1488 }
1489
1490 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1491 {
1492         struct kvm_run *run = vcpu->run;
1493         enum emulation_result emulated = EMULATE_FAIL;
1494         int r;
1495
1496         vcpu->arch.paddr_accessed += run->mmio.len;
1497
1498         if (!vcpu->mmio_is_write) {
1499                 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1500                          run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1501         } else {
1502                 emulated = kvmppc_handle_vsx_store(vcpu,
1503                          vcpu->arch.io_gpr, run->mmio.len, 1);
1504         }
1505
1506         switch (emulated) {
1507         case EMULATE_DO_MMIO:
1508                 run->exit_reason = KVM_EXIT_MMIO;
1509                 r = RESUME_HOST;
1510                 break;
1511         case EMULATE_FAIL:
1512                 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1513                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1514                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1515                 r = RESUME_HOST;
1516                 break;
1517         default:
1518                 r = RESUME_GUEST;
1519                 break;
1520         }
1521         return r;
1522 }
1523 #endif /* CONFIG_VSX */
1524
1525 #ifdef CONFIG_ALTIVEC
1526 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1527                 unsigned int rt, unsigned int bytes, int is_default_endian)
1528 {
1529         enum emulation_result emulated = EMULATE_DONE;
1530
1531         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1532                 return EMULATE_FAIL;
1533
1534         while (vcpu->arch.mmio_vmx_copy_nums) {
1535                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1536                                 is_default_endian, 0);
1537
1538                 if (emulated != EMULATE_DONE)
1539                         break;
1540
1541                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1542                 vcpu->arch.mmio_vmx_copy_nums--;
1543                 vcpu->arch.mmio_vmx_offset++;
1544         }
1545
1546         return emulated;
1547 }
1548
1549 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1550 {
1551         union kvmppc_one_reg reg;
1552         int vmx_offset = 0;
1553         int result = 0;
1554
1555         vmx_offset =
1556                 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1557
1558         if (vmx_offset == -1)
1559                 return -1;
1560
1561         reg.vval = VCPU_VSX_VR(vcpu, index);
1562         *val = reg.vsxval[vmx_offset];
1563
1564         return result;
1565 }
1566
1567 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1568 {
1569         union kvmppc_one_reg reg;
1570         int vmx_offset = 0;
1571         int result = 0;
1572
1573         vmx_offset =
1574                 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1575
1576         if (vmx_offset == -1)
1577                 return -1;
1578
1579         reg.vval = VCPU_VSX_VR(vcpu, index);
1580         *val = reg.vsx32val[vmx_offset];
1581
1582         return result;
1583 }
1584
1585 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1586 {
1587         union kvmppc_one_reg reg;
1588         int vmx_offset = 0;
1589         int result = 0;
1590
1591         vmx_offset =
1592                 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1593
1594         if (vmx_offset == -1)
1595                 return -1;
1596
1597         reg.vval = VCPU_VSX_VR(vcpu, index);
1598         *val = reg.vsx16val[vmx_offset];
1599
1600         return result;
1601 }
1602
1603 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1604 {
1605         union kvmppc_one_reg reg;
1606         int vmx_offset = 0;
1607         int result = 0;
1608
1609         vmx_offset =
1610                 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1611
1612         if (vmx_offset == -1)
1613                 return -1;
1614
1615         reg.vval = VCPU_VSX_VR(vcpu, index);
1616         *val = reg.vsx8val[vmx_offset];
1617
1618         return result;
1619 }
1620
1621 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1622                 unsigned int rs, unsigned int bytes, int is_default_endian)
1623 {
1624         u64 val = 0;
1625         unsigned int index = rs & KVM_MMIO_REG_MASK;
1626         enum emulation_result emulated = EMULATE_DONE;
1627
1628         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1629                 return EMULATE_FAIL;
1630
1631         vcpu->arch.io_gpr = rs;
1632
1633         while (vcpu->arch.mmio_vmx_copy_nums) {
1634                 switch (vcpu->arch.mmio_copy_type) {
1635                 case KVMPPC_VMX_COPY_DWORD:
1636                         if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1637                                 return EMULATE_FAIL;
1638
1639                         break;
1640                 case KVMPPC_VMX_COPY_WORD:
1641                         if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1642                                 return EMULATE_FAIL;
1643                         break;
1644                 case KVMPPC_VMX_COPY_HWORD:
1645                         if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1646                                 return EMULATE_FAIL;
1647                         break;
1648                 case KVMPPC_VMX_COPY_BYTE:
1649                         if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1650                                 return EMULATE_FAIL;
1651                         break;
1652                 default:
1653                         return EMULATE_FAIL;
1654                 }
1655
1656                 emulated = kvmppc_handle_store(vcpu, val, bytes,
1657                                 is_default_endian);
1658                 if (emulated != EMULATE_DONE)
1659                         break;
1660
1661                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1662                 vcpu->arch.mmio_vmx_copy_nums--;
1663                 vcpu->arch.mmio_vmx_offset++;
1664         }
1665
1666         return emulated;
1667 }
1668
1669 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1670 {
1671         struct kvm_run *run = vcpu->run;
1672         enum emulation_result emulated = EMULATE_FAIL;
1673         int r;
1674
1675         vcpu->arch.paddr_accessed += run->mmio.len;
1676
1677         if (!vcpu->mmio_is_write) {
1678                 emulated = kvmppc_handle_vmx_load(vcpu,
1679                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1680         } else {
1681                 emulated = kvmppc_handle_vmx_store(vcpu,
1682                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1683         }
1684
1685         switch (emulated) {
1686         case EMULATE_DO_MMIO:
1687                 run->exit_reason = KVM_EXIT_MMIO;
1688                 r = RESUME_HOST;
1689                 break;
1690         case EMULATE_FAIL:
1691                 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1692                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1693                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1694                 r = RESUME_HOST;
1695                 break;
1696         default:
1697                 r = RESUME_GUEST;
1698                 break;
1699         }
1700         return r;
1701 }
1702 #endif /* CONFIG_ALTIVEC */
1703
1704 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1705 {
1706         int r = 0;
1707         union kvmppc_one_reg val;
1708         int size;
1709
1710         size = one_reg_size(reg->id);
1711         if (size > sizeof(val))
1712                 return -EINVAL;
1713
1714         r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1715         if (r == -EINVAL) {
1716                 r = 0;
1717                 switch (reg->id) {
1718 #ifdef CONFIG_ALTIVEC
1719                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1720                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1721                                 r = -ENXIO;
1722                                 break;
1723                         }
1724                         val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1725                         break;
1726                 case KVM_REG_PPC_VSCR:
1727                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1728                                 r = -ENXIO;
1729                                 break;
1730                         }
1731                         val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1732                         break;
1733                 case KVM_REG_PPC_VRSAVE:
1734                         val = get_reg_val(reg->id, vcpu->arch.vrsave);
1735                         break;
1736 #endif /* CONFIG_ALTIVEC */
1737                 default:
1738                         r = -EINVAL;
1739                         break;
1740                 }
1741         }
1742
1743         if (r)
1744                 return r;
1745
1746         if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1747                 r = -EFAULT;
1748
1749         return r;
1750 }
1751
1752 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1753 {
1754         int r;
1755         union kvmppc_one_reg val;
1756         int size;
1757
1758         size = one_reg_size(reg->id);
1759         if (size > sizeof(val))
1760                 return -EINVAL;
1761
1762         if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1763                 return -EFAULT;
1764
1765         r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1766         if (r == -EINVAL) {
1767                 r = 0;
1768                 switch (reg->id) {
1769 #ifdef CONFIG_ALTIVEC
1770                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1771                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1772                                 r = -ENXIO;
1773                                 break;
1774                         }
1775                         vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1776                         break;
1777                 case KVM_REG_PPC_VSCR:
1778                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1779                                 r = -ENXIO;
1780                                 break;
1781                         }
1782                         vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1783                         break;
1784                 case KVM_REG_PPC_VRSAVE:
1785                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1786                                 r = -ENXIO;
1787                                 break;
1788                         }
1789                         vcpu->arch.vrsave = set_reg_val(reg->id, val);
1790                         break;
1791 #endif /* CONFIG_ALTIVEC */
1792                 default:
1793                         r = -EINVAL;
1794                         break;
1795                 }
1796         }
1797
1798         return r;
1799 }
1800
1801 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1802 {
1803         struct kvm_run *run = vcpu->run;
1804         int r;
1805
1806         vcpu_load(vcpu);
1807
1808         if (vcpu->mmio_needed) {
1809                 vcpu->mmio_needed = 0;
1810                 if (!vcpu->mmio_is_write)
1811                         kvmppc_complete_mmio_load(vcpu);
1812 #ifdef CONFIG_VSX
1813                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1814                         vcpu->arch.mmio_vsx_copy_nums--;
1815                         vcpu->arch.mmio_vsx_offset++;
1816                 }
1817
1818                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1819                         r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1820                         if (r == RESUME_HOST) {
1821                                 vcpu->mmio_needed = 1;
1822                                 goto out;
1823                         }
1824                 }
1825 #endif
1826 #ifdef CONFIG_ALTIVEC
1827                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1828                         vcpu->arch.mmio_vmx_copy_nums--;
1829                         vcpu->arch.mmio_vmx_offset++;
1830                 }
1831
1832                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1833                         r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1834                         if (r == RESUME_HOST) {
1835                                 vcpu->mmio_needed = 1;
1836                                 goto out;
1837                         }
1838                 }
1839 #endif
1840         } else if (vcpu->arch.osi_needed) {
1841                 u64 *gprs = run->osi.gprs;
1842                 int i;
1843
1844                 for (i = 0; i < 32; i++)
1845                         kvmppc_set_gpr(vcpu, i, gprs[i]);
1846                 vcpu->arch.osi_needed = 0;
1847         } else if (vcpu->arch.hcall_needed) {
1848                 int i;
1849
1850                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1851                 for (i = 0; i < 9; ++i)
1852                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1853                 vcpu->arch.hcall_needed = 0;
1854 #ifdef CONFIG_BOOKE
1855         } else if (vcpu->arch.epr_needed) {
1856                 kvmppc_set_epr(vcpu, run->epr.epr);
1857                 vcpu->arch.epr_needed = 0;
1858 #endif
1859         }
1860
1861         kvm_sigset_activate(vcpu);
1862
1863         if (run->immediate_exit)
1864                 r = -EINTR;
1865         else
1866                 r = kvmppc_vcpu_run(vcpu);
1867
1868         kvm_sigset_deactivate(vcpu);
1869
1870 #ifdef CONFIG_ALTIVEC
1871 out:
1872 #endif
1873
1874         /*
1875          * We're already returning to userspace, don't pass the
1876          * RESUME_HOST flags along.
1877          */
1878         if (r > 0)
1879                 r = 0;
1880
1881         vcpu_put(vcpu);
1882         return r;
1883 }
1884
1885 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1886 {
1887         if (irq->irq == KVM_INTERRUPT_UNSET) {
1888                 kvmppc_core_dequeue_external(vcpu);
1889                 return 0;
1890         }
1891
1892         kvmppc_core_queue_external(vcpu, irq);
1893
1894         kvm_vcpu_kick(vcpu);
1895
1896         return 0;
1897 }
1898
1899 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1900                                      struct kvm_enable_cap *cap)
1901 {
1902         int r;
1903
1904         if (cap->flags)
1905                 return -EINVAL;
1906
1907         switch (cap->cap) {
1908         case KVM_CAP_PPC_OSI:
1909                 r = 0;
1910                 vcpu->arch.osi_enabled = true;
1911                 break;
1912         case KVM_CAP_PPC_PAPR:
1913                 r = 0;
1914                 vcpu->arch.papr_enabled = true;
1915                 break;
1916         case KVM_CAP_PPC_EPR:
1917                 r = 0;
1918                 if (cap->args[0])
1919                         vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1920                 else
1921                         vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1922                 break;
1923 #ifdef CONFIG_BOOKE
1924         case KVM_CAP_PPC_BOOKE_WATCHDOG:
1925                 r = 0;
1926                 vcpu->arch.watchdog_enabled = true;
1927                 break;
1928 #endif
1929 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1930         case KVM_CAP_SW_TLB: {
1931                 struct kvm_config_tlb cfg;
1932                 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1933
1934                 r = -EFAULT;
1935                 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1936                         break;
1937
1938                 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1939                 break;
1940         }
1941 #endif
1942 #ifdef CONFIG_KVM_MPIC
1943         case KVM_CAP_IRQ_MPIC: {
1944                 struct fd f;
1945                 struct kvm_device *dev;
1946
1947                 r = -EBADF;
1948                 f = fdget(cap->args[0]);
1949                 if (!f.file)
1950                         break;
1951
1952                 r = -EPERM;
1953                 dev = kvm_device_from_filp(f.file);
1954                 if (dev)
1955                         r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1956
1957                 fdput(f);
1958                 break;
1959         }
1960 #endif
1961 #ifdef CONFIG_KVM_XICS
1962         case KVM_CAP_IRQ_XICS: {
1963                 struct fd f;
1964                 struct kvm_device *dev;
1965
1966                 r = -EBADF;
1967                 f = fdget(cap->args[0]);
1968                 if (!f.file)
1969                         break;
1970
1971                 r = -EPERM;
1972                 dev = kvm_device_from_filp(f.file);
1973                 if (dev) {
1974                         if (xics_on_xive())
1975                                 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1976                         else
1977                                 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1978                 }
1979
1980                 fdput(f);
1981                 break;
1982         }
1983 #endif /* CONFIG_KVM_XICS */
1984 #ifdef CONFIG_KVM_XIVE
1985         case KVM_CAP_PPC_IRQ_XIVE: {
1986                 struct fd f;
1987                 struct kvm_device *dev;
1988
1989                 r = -EBADF;
1990                 f = fdget(cap->args[0]);
1991                 if (!f.file)
1992                         break;
1993
1994                 r = -ENXIO;
1995                 if (!xive_enabled())
1996                         break;
1997
1998                 r = -EPERM;
1999                 dev = kvm_device_from_filp(f.file);
2000                 if (dev)
2001                         r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2002                                                             cap->args[1]);
2003
2004                 fdput(f);
2005                 break;
2006         }
2007 #endif /* CONFIG_KVM_XIVE */
2008 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2009         case KVM_CAP_PPC_FWNMI:
2010                 r = -EINVAL;
2011                 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2012                         break;
2013                 r = 0;
2014                 vcpu->kvm->arch.fwnmi_enabled = true;
2015                 break;
2016 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2017         default:
2018                 r = -EINVAL;
2019                 break;
2020         }
2021
2022         if (!r)
2023                 r = kvmppc_sanity_check(vcpu);
2024
2025         return r;
2026 }
2027
2028 bool kvm_arch_intc_initialized(struct kvm *kvm)
2029 {
2030 #ifdef CONFIG_KVM_MPIC
2031         if (kvm->arch.mpic)
2032                 return true;
2033 #endif
2034 #ifdef CONFIG_KVM_XICS
2035         if (kvm->arch.xics || kvm->arch.xive)
2036                 return true;
2037 #endif
2038         return false;
2039 }
2040
2041 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2042                                     struct kvm_mp_state *mp_state)
2043 {
2044         return -EINVAL;
2045 }
2046
2047 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2048                                     struct kvm_mp_state *mp_state)
2049 {
2050         return -EINVAL;
2051 }
2052
2053 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2054                                unsigned int ioctl, unsigned long arg)
2055 {
2056         struct kvm_vcpu *vcpu = filp->private_data;
2057         void __user *argp = (void __user *)arg;
2058
2059         if (ioctl == KVM_INTERRUPT) {
2060                 struct kvm_interrupt irq;
2061                 if (copy_from_user(&irq, argp, sizeof(irq)))
2062                         return -EFAULT;
2063                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2064         }
2065         return -ENOIOCTLCMD;
2066 }
2067
2068 long kvm_arch_vcpu_ioctl(struct file *filp,
2069                          unsigned int ioctl, unsigned long arg)
2070 {
2071         struct kvm_vcpu *vcpu = filp->private_data;
2072         void __user *argp = (void __user *)arg;
2073         long r;
2074
2075         switch (ioctl) {
2076         case KVM_ENABLE_CAP:
2077         {
2078                 struct kvm_enable_cap cap;
2079                 r = -EFAULT;
2080                 if (copy_from_user(&cap, argp, sizeof(cap)))
2081                         goto out;
2082                 vcpu_load(vcpu);
2083                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2084                 vcpu_put(vcpu);
2085                 break;
2086         }
2087
2088         case KVM_SET_ONE_REG:
2089         case KVM_GET_ONE_REG:
2090         {
2091                 struct kvm_one_reg reg;
2092                 r = -EFAULT;
2093                 if (copy_from_user(&reg, argp, sizeof(reg)))
2094                         goto out;
2095                 if (ioctl == KVM_SET_ONE_REG)
2096                         r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2097                 else
2098                         r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2099                 break;
2100         }
2101
2102 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2103         case KVM_DIRTY_TLB: {
2104                 struct kvm_dirty_tlb dirty;
2105                 r = -EFAULT;
2106                 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2107                         goto out;
2108                 vcpu_load(vcpu);
2109                 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2110                 vcpu_put(vcpu);
2111                 break;
2112         }
2113 #endif
2114         default:
2115                 r = -EINVAL;
2116         }
2117
2118 out:
2119         return r;
2120 }
2121
2122 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2123 {
2124         return VM_FAULT_SIGBUS;
2125 }
2126
2127 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2128 {
2129         u32 inst_nop = 0x60000000;
2130 #ifdef CONFIG_KVM_BOOKE_HV
2131         u32 inst_sc1 = 0x44000022;
2132         pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2133         pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2134         pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2135         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2136 #else
2137         u32 inst_lis = 0x3c000000;
2138         u32 inst_ori = 0x60000000;
2139         u32 inst_sc = 0x44000002;
2140         u32 inst_imm_mask = 0xffff;
2141
2142         /*
2143          * The hypercall to get into KVM from within guest context is as
2144          * follows:
2145          *
2146          *    lis r0, r0, KVM_SC_MAGIC_R0@h
2147          *    ori r0, KVM_SC_MAGIC_R0@l
2148          *    sc
2149          *    nop
2150          */
2151         pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2152         pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2153         pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2154         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2155 #endif
2156
2157         pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2158
2159         return 0;
2160 }
2161
2162 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2163 {
2164         int ret = 0;
2165
2166 #ifdef CONFIG_KVM_MPIC
2167         ret = ret || (kvm->arch.mpic != NULL);
2168 #endif
2169 #ifdef CONFIG_KVM_XICS
2170         ret = ret || (kvm->arch.xics != NULL);
2171         ret = ret || (kvm->arch.xive != NULL);
2172 #endif
2173         smp_rmb();
2174         return ret;
2175 }
2176
2177 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2178                           bool line_status)
2179 {
2180         if (!kvm_arch_irqchip_in_kernel(kvm))
2181                 return -ENXIO;
2182
2183         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2184                                         irq_event->irq, irq_event->level,
2185                                         line_status);
2186         return 0;
2187 }
2188
2189
2190 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2191                             struct kvm_enable_cap *cap)
2192 {
2193         int r;
2194
2195         if (cap->flags)
2196                 return -EINVAL;
2197
2198         switch (cap->cap) {
2199 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2200         case KVM_CAP_PPC_ENABLE_HCALL: {
2201                 unsigned long hcall = cap->args[0];
2202
2203                 r = -EINVAL;
2204                 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2205                     cap->args[1] > 1)
2206                         break;
2207                 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2208                         break;
2209                 if (cap->args[1])
2210                         set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2211                 else
2212                         clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2213                 r = 0;
2214                 break;
2215         }
2216         case KVM_CAP_PPC_SMT: {
2217                 unsigned long mode = cap->args[0];
2218                 unsigned long flags = cap->args[1];
2219
2220                 r = -EINVAL;
2221                 if (kvm->arch.kvm_ops->set_smt_mode)
2222                         r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2223                 break;
2224         }
2225
2226         case KVM_CAP_PPC_NESTED_HV:
2227                 r = -EINVAL;
2228                 if (!is_kvmppc_hv_enabled(kvm) ||
2229                     !kvm->arch.kvm_ops->enable_nested)
2230                         break;
2231                 r = kvm->arch.kvm_ops->enable_nested(kvm);
2232                 break;
2233 #endif
2234 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2235         case KVM_CAP_PPC_SECURE_GUEST:
2236                 r = -EINVAL;
2237                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2238                         break;
2239                 r = kvm->arch.kvm_ops->enable_svm(kvm);
2240                 break;
2241         case KVM_CAP_PPC_DAWR1:
2242                 r = -EINVAL;
2243                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2244                         break;
2245                 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2246                 break;
2247 #endif
2248         default:
2249                 r = -EINVAL;
2250                 break;
2251         }
2252
2253         return r;
2254 }
2255
2256 #ifdef CONFIG_PPC_BOOK3S_64
2257 /*
2258  * These functions check whether the underlying hardware is safe
2259  * against attacks based on observing the effects of speculatively
2260  * executed instructions, and whether it supplies instructions for
2261  * use in workarounds.  The information comes from firmware, either
2262  * via the device tree on powernv platforms or from an hcall on
2263  * pseries platforms.
2264  */
2265 #ifdef CONFIG_PPC_PSERIES
2266 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2267 {
2268         struct h_cpu_char_result c;
2269         unsigned long rc;
2270
2271         if (!machine_is(pseries))
2272                 return -ENOTTY;
2273
2274         rc = plpar_get_cpu_characteristics(&c);
2275         if (rc == H_SUCCESS) {
2276                 cp->character = c.character;
2277                 cp->behaviour = c.behaviour;
2278                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2279                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2280                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2281                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2282                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2283                         KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2284                         KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2285                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2286                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2287                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2288                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2289                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2290                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2291         }
2292         return 0;
2293 }
2294 #else
2295 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2296 {
2297         return -ENOTTY;
2298 }
2299 #endif
2300
2301 static inline bool have_fw_feat(struct device_node *fw_features,
2302                                 const char *state, const char *name)
2303 {
2304         struct device_node *np;
2305         bool r = false;
2306
2307         np = of_get_child_by_name(fw_features, name);
2308         if (np) {
2309                 r = of_property_read_bool(np, state);
2310                 of_node_put(np);
2311         }
2312         return r;
2313 }
2314
2315 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2316 {
2317         struct device_node *np, *fw_features;
2318         int r;
2319
2320         memset(cp, 0, sizeof(*cp));
2321         r = pseries_get_cpu_char(cp);
2322         if (r != -ENOTTY)
2323                 return r;
2324
2325         np = of_find_node_by_name(NULL, "ibm,opal");
2326         if (np) {
2327                 fw_features = of_get_child_by_name(np, "fw-features");
2328                 of_node_put(np);
2329                 if (!fw_features)
2330                         return 0;
2331                 if (have_fw_feat(fw_features, "enabled",
2332                                  "inst-spec-barrier-ori31,31,0"))
2333                         cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2334                 if (have_fw_feat(fw_features, "enabled",
2335                                  "fw-bcctrl-serialized"))
2336                         cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2337                 if (have_fw_feat(fw_features, "enabled",
2338                                  "inst-l1d-flush-ori30,30,0"))
2339                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2340                 if (have_fw_feat(fw_features, "enabled",
2341                                  "inst-l1d-flush-trig2"))
2342                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2343                 if (have_fw_feat(fw_features, "enabled",
2344                                  "fw-l1d-thread-split"))
2345                         cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2346                 if (have_fw_feat(fw_features, "enabled",
2347                                  "fw-count-cache-disabled"))
2348                         cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2349                 if (have_fw_feat(fw_features, "enabled",
2350                                  "fw-count-cache-flush-bcctr2,0,0"))
2351                         cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2352                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2353                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2354                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2355                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2356                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2357                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2358                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2359
2360                 if (have_fw_feat(fw_features, "enabled",
2361                                  "speculation-policy-favor-security"))
2362                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2363                 if (!have_fw_feat(fw_features, "disabled",
2364                                   "needs-l1d-flush-msr-pr-0-to-1"))
2365                         cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2366                 if (!have_fw_feat(fw_features, "disabled",
2367                                   "needs-spec-barrier-for-bound-checks"))
2368                         cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2369                 if (have_fw_feat(fw_features, "enabled",
2370                                  "needs-count-cache-flush-on-context-switch"))
2371                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2372                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2373                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2374                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2375                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2376
2377                 of_node_put(fw_features);
2378         }
2379
2380         return 0;
2381 }
2382 #endif
2383
2384 long kvm_arch_vm_ioctl(struct file *filp,
2385                        unsigned int ioctl, unsigned long arg)
2386 {
2387         struct kvm *kvm __maybe_unused = filp->private_data;
2388         void __user *argp = (void __user *)arg;
2389         long r;
2390
2391         switch (ioctl) {
2392         case KVM_PPC_GET_PVINFO: {
2393                 struct kvm_ppc_pvinfo pvinfo;
2394                 memset(&pvinfo, 0, sizeof(pvinfo));
2395                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2396                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2397                         r = -EFAULT;
2398                         goto out;
2399                 }
2400
2401                 break;
2402         }
2403 #ifdef CONFIG_SPAPR_TCE_IOMMU
2404         case KVM_CREATE_SPAPR_TCE_64: {
2405                 struct kvm_create_spapr_tce_64 create_tce_64;
2406
2407                 r = -EFAULT;
2408                 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2409                         goto out;
2410                 if (create_tce_64.flags) {
2411                         r = -EINVAL;
2412                         goto out;
2413                 }
2414                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2415                 goto out;
2416         }
2417         case KVM_CREATE_SPAPR_TCE: {
2418                 struct kvm_create_spapr_tce create_tce;
2419                 struct kvm_create_spapr_tce_64 create_tce_64;
2420
2421                 r = -EFAULT;
2422                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2423                         goto out;
2424
2425                 create_tce_64.liobn = create_tce.liobn;
2426                 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2427                 create_tce_64.offset = 0;
2428                 create_tce_64.size = create_tce.window_size >>
2429                                 IOMMU_PAGE_SHIFT_4K;
2430                 create_tce_64.flags = 0;
2431                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2432                 goto out;
2433         }
2434 #endif
2435 #ifdef CONFIG_PPC_BOOK3S_64
2436         case KVM_PPC_GET_SMMU_INFO: {
2437                 struct kvm_ppc_smmu_info info;
2438                 struct kvm *kvm = filp->private_data;
2439
2440                 memset(&info, 0, sizeof(info));
2441                 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2442                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2443                         r = -EFAULT;
2444                 break;
2445         }
2446         case KVM_PPC_RTAS_DEFINE_TOKEN: {
2447                 struct kvm *kvm = filp->private_data;
2448
2449                 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2450                 break;
2451         }
2452         case KVM_PPC_CONFIGURE_V3_MMU: {
2453                 struct kvm *kvm = filp->private_data;
2454                 struct kvm_ppc_mmuv3_cfg cfg;
2455
2456                 r = -EINVAL;
2457                 if (!kvm->arch.kvm_ops->configure_mmu)
2458                         goto out;
2459                 r = -EFAULT;
2460                 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2461                         goto out;
2462                 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2463                 break;
2464         }
2465         case KVM_PPC_GET_RMMU_INFO: {
2466                 struct kvm *kvm = filp->private_data;
2467                 struct kvm_ppc_rmmu_info info;
2468
2469                 r = -EINVAL;
2470                 if (!kvm->arch.kvm_ops->get_rmmu_info)
2471                         goto out;
2472                 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2473                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2474                         r = -EFAULT;
2475                 break;
2476         }
2477         case KVM_PPC_GET_CPU_CHAR: {
2478                 struct kvm_ppc_cpu_char cpuchar;
2479
2480                 r = kvmppc_get_cpu_char(&cpuchar);
2481                 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2482                         r = -EFAULT;
2483                 break;
2484         }
2485         case KVM_PPC_SVM_OFF: {
2486                 struct kvm *kvm = filp->private_data;
2487
2488                 r = 0;
2489                 if (!kvm->arch.kvm_ops->svm_off)
2490                         goto out;
2491
2492                 r = kvm->arch.kvm_ops->svm_off(kvm);
2493                 break;
2494         }
2495         default: {
2496                 struct kvm *kvm = filp->private_data;
2497                 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2498         }
2499 #else /* CONFIG_PPC_BOOK3S_64 */
2500         default:
2501                 r = -ENOTTY;
2502 #endif
2503         }
2504 out:
2505         return r;
2506 }
2507
2508 static DEFINE_IDA(lpid_inuse);
2509 static unsigned long nr_lpids;
2510
2511 long kvmppc_alloc_lpid(void)
2512 {
2513         int lpid;
2514
2515         /* The host LPID must always be 0 (allocation starts at 1) */
2516         lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2517         if (lpid < 0) {
2518                 if (lpid == -ENOMEM)
2519                         pr_err("%s: Out of memory\n", __func__);
2520                 else
2521                         pr_err("%s: No LPIDs free\n", __func__);
2522                 return -ENOMEM;
2523         }
2524
2525         return lpid;
2526 }
2527 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2528
2529 void kvmppc_free_lpid(long lpid)
2530 {
2531         ida_free(&lpid_inuse, lpid);
2532 }
2533 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2534
2535 /* nr_lpids_param includes the host LPID */
2536 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2537 {
2538         nr_lpids = nr_lpids_param;
2539 }
2540 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2541
2542 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2543
2544 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2545 {
2546         if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2547                 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2548 }
2549
2550 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2551 {
2552         if (kvm->arch.kvm_ops->create_vm_debugfs)
2553                 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2554         return 0;
2555 }