2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpu.h>
31 #include <linux/cpumask.h>
32 #include <linux/spinlock.h>
33 #include <linux/page-flags.h>
34 #include <linux/srcu.h>
35 #include <linux/miscdevice.h>
36 #include <linux/debugfs.h>
39 #include <asm/cputable.h>
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42 #include <asm/uaccess.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/kvm_book3s.h>
46 #include <asm/mmu_context.h>
47 #include <asm/lppaca.h>
48 #include <asm/processor.h>
49 #include <asm/cputhreads.h>
51 #include <asm/hvcall.h>
52 #include <asm/switch_to.h>
54 #include <asm/dbell.h>
56 #include <linux/gfp.h>
57 #include <linux/vmalloc.h>
58 #include <linux/highmem.h>
59 #include <linux/hugetlb.h>
60 #include <linux/module.h>
64 #define CREATE_TRACE_POINTS
67 /* #define EXIT_DEBUG */
68 /* #define EXIT_DEBUG_SIMPLE */
69 /* #define EXIT_DEBUG_INT */
71 /* Used to indicate that a guest page fault needs to be handled */
72 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
74 /* Used as a "null" value for timebase values */
75 #define TB_NIL (~(u64)0)
77 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
79 static int dynamic_mt_modes = 6;
80 module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
81 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
82 static int target_smt_mode;
83 module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
86 #ifdef CONFIG_KVM_XICS
87 static struct kernel_param_ops module_param_ops = {
92 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
94 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
97 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
98 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
100 static bool kvmppc_ipi_thread(int cpu)
102 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
103 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
105 if (cpu_first_thread_sibling(cpu) ==
106 cpu_first_thread_sibling(smp_processor_id())) {
107 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
108 msg |= cpu_thread_in_core(cpu);
110 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
117 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
118 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
127 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
130 struct swait_queue_head *wqp;
132 wqp = kvm_arch_vcpu_wq(vcpu);
133 if (swait_active(wqp)) {
135 ++vcpu->stat.halt_wakeup;
138 if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))
141 /* CPU points to the first thread of the core */
143 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
144 smp_send_reschedule(cpu);
148 * We use the vcpu_load/put functions to measure stolen time.
149 * Stolen time is counted as time when either the vcpu is able to
150 * run as part of a virtual core, but the task running the vcore
151 * is preempted or sleeping, or when the vcpu needs something done
152 * in the kernel by the task running the vcpu, but that task is
153 * preempted or sleeping. Those two things have to be counted
154 * separately, since one of the vcpu tasks will take on the job
155 * of running the core, and the other vcpu tasks in the vcore will
156 * sleep waiting for it to do that, but that sleep shouldn't count
159 * Hence we accumulate stolen time when the vcpu can run as part of
160 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
161 * needs its task to do other things in the kernel (for example,
162 * service a page fault) in busy_stolen. We don't accumulate
163 * stolen time for a vcore when it is inactive, or for a vcpu
164 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
165 * a misnomer; it means that the vcpu task is not executing in
166 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
167 * the kernel. We don't have any way of dividing up that time
168 * between time that the vcpu is genuinely stopped, time that
169 * the task is actively working on behalf of the vcpu, and time
170 * that the task is preempted, so we don't count any of it as
173 * Updates to busy_stolen are protected by arch.tbacct_lock;
174 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
175 * lock. The stolen times are measured in units of timebase ticks.
176 * (Note that the != TB_NIL checks below are purely defensive;
177 * they should never fail.)
180 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
184 spin_lock_irqsave(&vc->stoltb_lock, flags);
185 vc->preempt_tb = mftb();
186 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
189 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
193 spin_lock_irqsave(&vc->stoltb_lock, flags);
194 if (vc->preempt_tb != TB_NIL) {
195 vc->stolen_tb += mftb() - vc->preempt_tb;
196 vc->preempt_tb = TB_NIL;
198 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
201 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
203 struct kvmppc_vcore *vc = vcpu->arch.vcore;
207 * We can test vc->runner without taking the vcore lock,
208 * because only this task ever sets vc->runner to this
209 * vcpu, and once it is set to this vcpu, only this task
210 * ever sets it to NULL.
212 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
213 kvmppc_core_end_stolen(vc);
215 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
216 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
217 vcpu->arch.busy_preempt != TB_NIL) {
218 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
219 vcpu->arch.busy_preempt = TB_NIL;
221 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
224 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
226 struct kvmppc_vcore *vc = vcpu->arch.vcore;
229 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
230 kvmppc_core_start_stolen(vc);
232 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
233 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
234 vcpu->arch.busy_preempt = mftb();
235 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
238 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
241 * Check for illegal transactional state bit combination
242 * and if we find it, force the TS field to a safe state.
244 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
246 vcpu->arch.shregs.msr = msr;
247 kvmppc_end_cede(vcpu);
250 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
252 vcpu->arch.pvr = pvr;
255 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
257 unsigned long pcr = 0;
258 struct kvmppc_vcore *vc = vcpu->arch.vcore;
261 switch (arch_compat) {
264 * If an arch bit is set in PCR, all the defined
265 * higher-order arch bits also have to be set.
267 pcr = PCR_ARCH_206 | PCR_ARCH_205;
279 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
280 /* POWER7 can't emulate POWER8 */
281 if (!(pcr & PCR_ARCH_206))
283 pcr &= ~PCR_ARCH_206;
287 spin_lock(&vc->lock);
288 vc->arch_compat = arch_compat;
290 spin_unlock(&vc->lock);
295 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
299 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
300 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
301 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
302 for (r = 0; r < 16; ++r)
303 pr_err("r%2d = %.16lx r%d = %.16lx\n",
304 r, kvmppc_get_gpr(vcpu, r),
305 r+16, kvmppc_get_gpr(vcpu, r+16));
306 pr_err("ctr = %.16lx lr = %.16lx\n",
307 vcpu->arch.ctr, vcpu->arch.lr);
308 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
309 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
310 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
311 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
312 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
313 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
314 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
315 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
316 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
317 pr_err("fault dar = %.16lx dsisr = %.8x\n",
318 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
319 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
320 for (r = 0; r < vcpu->arch.slb_max; ++r)
321 pr_err(" ESID = %.16llx VSID = %.16llx\n",
322 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
323 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
324 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
325 vcpu->arch.last_inst);
328 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
330 struct kvm_vcpu *ret;
332 mutex_lock(&kvm->lock);
333 ret = kvm_get_vcpu_by_id(kvm, id);
334 mutex_unlock(&kvm->lock);
338 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
340 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
341 vpa->yield_count = cpu_to_be32(1);
344 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
345 unsigned long addr, unsigned long len)
347 /* check address is cacheline aligned */
348 if (addr & (L1_CACHE_BYTES - 1))
350 spin_lock(&vcpu->arch.vpa_update_lock);
351 if (v->next_gpa != addr || v->len != len) {
353 v->len = addr ? len : 0;
354 v->update_pending = 1;
356 spin_unlock(&vcpu->arch.vpa_update_lock);
360 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
369 static int vpa_is_registered(struct kvmppc_vpa *vpap)
371 if (vpap->update_pending)
372 return vpap->next_gpa != 0;
373 return vpap->pinned_addr != NULL;
376 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
378 unsigned long vcpuid, unsigned long vpa)
380 struct kvm *kvm = vcpu->kvm;
381 unsigned long len, nb;
383 struct kvm_vcpu *tvcpu;
386 struct kvmppc_vpa *vpap;
388 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
392 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
393 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
394 subfunc == H_VPA_REG_SLB) {
395 /* Registering new area - address must be cache-line aligned */
396 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
399 /* convert logical addr to kernel addr and read length */
400 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
403 if (subfunc == H_VPA_REG_VPA)
404 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
406 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
407 kvmppc_unpin_guest_page(kvm, va, vpa, false);
410 if (len > nb || len < sizeof(struct reg_vpa))
419 spin_lock(&tvcpu->arch.vpa_update_lock);
422 case H_VPA_REG_VPA: /* register VPA */
423 if (len < sizeof(struct lppaca))
425 vpap = &tvcpu->arch.vpa;
429 case H_VPA_REG_DTL: /* register DTL */
430 if (len < sizeof(struct dtl_entry))
432 len -= len % sizeof(struct dtl_entry);
434 /* Check that they have previously registered a VPA */
436 if (!vpa_is_registered(&tvcpu->arch.vpa))
439 vpap = &tvcpu->arch.dtl;
443 case H_VPA_REG_SLB: /* register SLB shadow buffer */
444 /* Check that they have previously registered a VPA */
446 if (!vpa_is_registered(&tvcpu->arch.vpa))
449 vpap = &tvcpu->arch.slb_shadow;
453 case H_VPA_DEREG_VPA: /* deregister VPA */
454 /* Check they don't still have a DTL or SLB buf registered */
456 if (vpa_is_registered(&tvcpu->arch.dtl) ||
457 vpa_is_registered(&tvcpu->arch.slb_shadow))
460 vpap = &tvcpu->arch.vpa;
464 case H_VPA_DEREG_DTL: /* deregister DTL */
465 vpap = &tvcpu->arch.dtl;
469 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
470 vpap = &tvcpu->arch.slb_shadow;
476 vpap->next_gpa = vpa;
478 vpap->update_pending = 1;
481 spin_unlock(&tvcpu->arch.vpa_update_lock);
486 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
488 struct kvm *kvm = vcpu->kvm;
494 * We need to pin the page pointed to by vpap->next_gpa,
495 * but we can't call kvmppc_pin_guest_page under the lock
496 * as it does get_user_pages() and down_read(). So we
497 * have to drop the lock, pin the page, then get the lock
498 * again and check that a new area didn't get registered
502 gpa = vpap->next_gpa;
503 spin_unlock(&vcpu->arch.vpa_update_lock);
507 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
508 spin_lock(&vcpu->arch.vpa_update_lock);
509 if (gpa == vpap->next_gpa)
511 /* sigh... unpin that one and try again */
513 kvmppc_unpin_guest_page(kvm, va, gpa, false);
516 vpap->update_pending = 0;
517 if (va && nb < vpap->len) {
519 * If it's now too short, it must be that userspace
520 * has changed the mappings underlying guest memory,
521 * so unregister the region.
523 kvmppc_unpin_guest_page(kvm, va, gpa, false);
526 if (vpap->pinned_addr)
527 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
530 vpap->pinned_addr = va;
533 vpap->pinned_end = va + vpap->len;
536 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
538 if (!(vcpu->arch.vpa.update_pending ||
539 vcpu->arch.slb_shadow.update_pending ||
540 vcpu->arch.dtl.update_pending))
543 spin_lock(&vcpu->arch.vpa_update_lock);
544 if (vcpu->arch.vpa.update_pending) {
545 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
546 if (vcpu->arch.vpa.pinned_addr)
547 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
549 if (vcpu->arch.dtl.update_pending) {
550 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
551 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
552 vcpu->arch.dtl_index = 0;
554 if (vcpu->arch.slb_shadow.update_pending)
555 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
556 spin_unlock(&vcpu->arch.vpa_update_lock);
560 * Return the accumulated stolen time for the vcore up until `now'.
561 * The caller should hold the vcore lock.
563 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
568 spin_lock_irqsave(&vc->stoltb_lock, flags);
570 if (vc->vcore_state != VCORE_INACTIVE &&
571 vc->preempt_tb != TB_NIL)
572 p += now - vc->preempt_tb;
573 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
577 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
578 struct kvmppc_vcore *vc)
580 struct dtl_entry *dt;
582 unsigned long stolen;
583 unsigned long core_stolen;
586 dt = vcpu->arch.dtl_ptr;
587 vpa = vcpu->arch.vpa.pinned_addr;
589 core_stolen = vcore_stolen_time(vc, now);
590 stolen = core_stolen - vcpu->arch.stolen_logged;
591 vcpu->arch.stolen_logged = core_stolen;
592 spin_lock_irq(&vcpu->arch.tbacct_lock);
593 stolen += vcpu->arch.busy_stolen;
594 vcpu->arch.busy_stolen = 0;
595 spin_unlock_irq(&vcpu->arch.tbacct_lock);
598 memset(dt, 0, sizeof(struct dtl_entry));
599 dt->dispatch_reason = 7;
600 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
601 dt->timebase = cpu_to_be64(now + vc->tb_offset);
602 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
603 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
604 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
606 if (dt == vcpu->arch.dtl.pinned_end)
607 dt = vcpu->arch.dtl.pinned_addr;
608 vcpu->arch.dtl_ptr = dt;
609 /* order writing *dt vs. writing vpa->dtl_idx */
611 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
612 vcpu->arch.dtl.dirty = true;
615 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
617 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
619 if ((!vcpu->arch.vcore->arch_compat) &&
620 cpu_has_feature(CPU_FTR_ARCH_207S))
625 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
626 unsigned long resource, unsigned long value1,
627 unsigned long value2)
630 case H_SET_MODE_RESOURCE_SET_CIABR:
631 if (!kvmppc_power8_compatible(vcpu))
636 return H_UNSUPPORTED_FLAG_START;
637 /* Guests can't breakpoint the hypervisor */
638 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
640 vcpu->arch.ciabr = value1;
642 case H_SET_MODE_RESOURCE_SET_DAWR:
643 if (!kvmppc_power8_compatible(vcpu))
646 return H_UNSUPPORTED_FLAG_START;
647 if (value2 & DABRX_HYP)
649 vcpu->arch.dawr = value1;
650 vcpu->arch.dawrx = value2;
657 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
659 struct kvmppc_vcore *vcore = target->arch.vcore;
662 * We expect to have been called by the real mode handler
663 * (kvmppc_rm_h_confer()) which would have directly returned
664 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
665 * have useful work to do and should not confer) so we don't
669 spin_lock(&vcore->lock);
670 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
671 vcore->vcore_state != VCORE_INACTIVE &&
673 target = vcore->runner;
674 spin_unlock(&vcore->lock);
676 return kvm_vcpu_yield_to(target);
679 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
682 struct lppaca *lppaca;
684 spin_lock(&vcpu->arch.vpa_update_lock);
685 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
687 yield_count = be32_to_cpu(lppaca->yield_count);
688 spin_unlock(&vcpu->arch.vpa_update_lock);
692 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
694 unsigned long req = kvmppc_get_gpr(vcpu, 3);
695 unsigned long target, ret = H_SUCCESS;
697 struct kvm_vcpu *tvcpu;
700 if (req <= MAX_HCALL_OPCODE &&
701 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
708 target = kvmppc_get_gpr(vcpu, 4);
709 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
714 tvcpu->arch.prodded = 1;
716 if (vcpu->arch.ceded) {
717 if (swait_active(&vcpu->wq)) {
719 vcpu->stat.halt_wakeup++;
724 target = kvmppc_get_gpr(vcpu, 4);
727 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
732 yield_count = kvmppc_get_gpr(vcpu, 5);
733 if (kvmppc_get_yield_count(tvcpu) != yield_count)
735 kvm_arch_vcpu_yield_to(tvcpu);
738 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
739 kvmppc_get_gpr(vcpu, 5),
740 kvmppc_get_gpr(vcpu, 6));
743 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
746 idx = srcu_read_lock(&vcpu->kvm->srcu);
747 rc = kvmppc_rtas_hcall(vcpu);
748 srcu_read_unlock(&vcpu->kvm->srcu, idx);
755 /* Send the error out to userspace via KVM_RUN */
757 case H_LOGICAL_CI_LOAD:
758 ret = kvmppc_h_logical_ci_load(vcpu);
759 if (ret == H_TOO_HARD)
762 case H_LOGICAL_CI_STORE:
763 ret = kvmppc_h_logical_ci_store(vcpu);
764 if (ret == H_TOO_HARD)
768 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
769 kvmppc_get_gpr(vcpu, 5),
770 kvmppc_get_gpr(vcpu, 6),
771 kvmppc_get_gpr(vcpu, 7));
772 if (ret == H_TOO_HARD)
781 if (kvmppc_xics_enabled(vcpu)) {
782 ret = kvmppc_xics_hcall(vcpu, req);
787 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
788 kvmppc_get_gpr(vcpu, 5),
789 kvmppc_get_gpr(vcpu, 6));
790 if (ret == H_TOO_HARD)
793 case H_PUT_TCE_INDIRECT:
794 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
795 kvmppc_get_gpr(vcpu, 5),
796 kvmppc_get_gpr(vcpu, 6),
797 kvmppc_get_gpr(vcpu, 7));
798 if (ret == H_TOO_HARD)
802 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
803 kvmppc_get_gpr(vcpu, 5),
804 kvmppc_get_gpr(vcpu, 6),
805 kvmppc_get_gpr(vcpu, 7));
806 if (ret == H_TOO_HARD)
812 kvmppc_set_gpr(vcpu, 3, ret);
813 vcpu->arch.hcall_needed = 0;
817 static int kvmppc_hcall_impl_hv(unsigned long cmd)
825 case H_LOGICAL_CI_LOAD:
826 case H_LOGICAL_CI_STORE:
827 #ifdef CONFIG_KVM_XICS
838 /* See if it's in the real-mode table */
839 return kvmppc_hcall_impl_hv_realmode(cmd);
842 static int kvmppc_emulate_debug_inst(struct kvm_run *run,
843 struct kvm_vcpu *vcpu)
847 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
850 * Fetch failed, so return to guest and
851 * try executing it again.
856 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
857 run->exit_reason = KVM_EXIT_DEBUG;
858 run->debug.arch.address = kvmppc_get_pc(vcpu);
861 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
866 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
867 struct task_struct *tsk)
871 vcpu->stat.sum_exits++;
874 * This can happen if an interrupt occurs in the last stages
875 * of guest entry or the first stages of guest exit (i.e. after
876 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
877 * and before setting it to KVM_GUEST_MODE_HOST_HV).
878 * That can happen due to a bug, or due to a machine check
879 * occurring at just the wrong time.
881 if (vcpu->arch.shregs.msr & MSR_HV) {
882 printk(KERN_EMERG "KVM trap in HV mode!\n");
883 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
884 vcpu->arch.trap, kvmppc_get_pc(vcpu),
885 vcpu->arch.shregs.msr);
886 kvmppc_dump_regs(vcpu);
887 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
888 run->hw.hardware_exit_reason = vcpu->arch.trap;
891 run->exit_reason = KVM_EXIT_UNKNOWN;
892 run->ready_for_interrupt_injection = 1;
893 switch (vcpu->arch.trap) {
894 /* We're good on these - the host merely wanted to get our attention */
895 case BOOK3S_INTERRUPT_HV_DECREMENTER:
896 vcpu->stat.dec_exits++;
899 case BOOK3S_INTERRUPT_EXTERNAL:
900 case BOOK3S_INTERRUPT_H_DOORBELL:
901 vcpu->stat.ext_intr_exits++;
904 /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
905 case BOOK3S_INTERRUPT_HMI:
906 case BOOK3S_INTERRUPT_PERFMON:
909 case BOOK3S_INTERRUPT_MACHINE_CHECK:
911 * Deliver a machine check interrupt to the guest.
912 * We have to do this, even if the host has handled the
913 * machine check, because machine checks use SRR0/1 and
914 * the interrupt might have trashed guest state in them.
916 kvmppc_book3s_queue_irqprio(vcpu,
917 BOOK3S_INTERRUPT_MACHINE_CHECK);
920 case BOOK3S_INTERRUPT_PROGRAM:
924 * Normally program interrupts are delivered directly
925 * to the guest by the hardware, but we can get here
926 * as a result of a hypervisor emulation interrupt
927 * (e40) getting turned into a 700 by BML RTAS.
929 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
930 kvmppc_core_queue_program(vcpu, flags);
934 case BOOK3S_INTERRUPT_SYSCALL:
936 /* hcall - punt to userspace */
939 /* hypercall with MSR_PR has already been handled in rmode,
940 * and never reaches here.
943 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
944 for (i = 0; i < 9; ++i)
945 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
946 run->exit_reason = KVM_EXIT_PAPR_HCALL;
947 vcpu->arch.hcall_needed = 1;
952 * We get these next two if the guest accesses a page which it thinks
953 * it has mapped but which is not actually present, either because
954 * it is for an emulated I/O device or because the corresonding
955 * host page has been paged out. Any other HDSI/HISI interrupts
956 * have been handled already.
958 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
959 r = RESUME_PAGE_FAULT;
961 case BOOK3S_INTERRUPT_H_INST_STORAGE:
962 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
963 vcpu->arch.fault_dsisr = 0;
964 r = RESUME_PAGE_FAULT;
967 * This occurs if the guest executes an illegal instruction.
968 * If the guest debug is disabled, generate a program interrupt
969 * to the guest. If guest debug is enabled, we need to check
970 * whether the instruction is a software breakpoint instruction.
971 * Accordingly return to Guest or Host.
973 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
974 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
975 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
976 swab32(vcpu->arch.emul_inst) :
977 vcpu->arch.emul_inst;
978 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
979 r = kvmppc_emulate_debug_inst(run, vcpu);
981 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
986 * This occurs if the guest (kernel or userspace), does something that
987 * is prohibited by HFSCR. We just generate a program interrupt to
990 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
991 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
995 kvmppc_dump_regs(vcpu);
996 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
997 vcpu->arch.trap, kvmppc_get_pc(vcpu),
998 vcpu->arch.shregs.msr);
999 run->hw.hardware_exit_reason = vcpu->arch.trap;
1007 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1008 struct kvm_sregs *sregs)
1012 memset(sregs, 0, sizeof(struct kvm_sregs));
1013 sregs->pvr = vcpu->arch.pvr;
1014 for (i = 0; i < vcpu->arch.slb_max; i++) {
1015 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1016 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1022 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1023 struct kvm_sregs *sregs)
1027 /* Only accept the same PVR as the host's, since we can't spoof it */
1028 if (sregs->pvr != vcpu->arch.pvr)
1032 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1033 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1034 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1035 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1039 vcpu->arch.slb_max = j;
1044 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1045 bool preserve_top32)
1047 struct kvm *kvm = vcpu->kvm;
1048 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1051 mutex_lock(&kvm->lock);
1052 spin_lock(&vc->lock);
1054 * If ILE (interrupt little-endian) has changed, update the
1055 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1057 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1058 struct kvm_vcpu *vcpu;
1061 kvm_for_each_vcpu(i, vcpu, kvm) {
1062 if (vcpu->arch.vcore != vc)
1064 if (new_lpcr & LPCR_ILE)
1065 vcpu->arch.intr_msr |= MSR_LE;
1067 vcpu->arch.intr_msr &= ~MSR_LE;
1072 * Userspace can only modify DPFD (default prefetch depth),
1073 * ILE (interrupt little-endian) and TC (translation control).
1074 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
1076 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1077 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1080 /* Broken 32-bit version of LPCR must not clear top bits */
1083 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1084 spin_unlock(&vc->lock);
1085 mutex_unlock(&kvm->lock);
1088 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1089 union kvmppc_one_reg *val)
1095 case KVM_REG_PPC_DEBUG_INST:
1096 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1098 case KVM_REG_PPC_HIOR:
1099 *val = get_reg_val(id, 0);
1101 case KVM_REG_PPC_DABR:
1102 *val = get_reg_val(id, vcpu->arch.dabr);
1104 case KVM_REG_PPC_DABRX:
1105 *val = get_reg_val(id, vcpu->arch.dabrx);
1107 case KVM_REG_PPC_DSCR:
1108 *val = get_reg_val(id, vcpu->arch.dscr);
1110 case KVM_REG_PPC_PURR:
1111 *val = get_reg_val(id, vcpu->arch.purr);
1113 case KVM_REG_PPC_SPURR:
1114 *val = get_reg_val(id, vcpu->arch.spurr);
1116 case KVM_REG_PPC_AMR:
1117 *val = get_reg_val(id, vcpu->arch.amr);
1119 case KVM_REG_PPC_UAMOR:
1120 *val = get_reg_val(id, vcpu->arch.uamor);
1122 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1123 i = id - KVM_REG_PPC_MMCR0;
1124 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1126 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1127 i = id - KVM_REG_PPC_PMC1;
1128 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1130 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1131 i = id - KVM_REG_PPC_SPMC1;
1132 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1134 case KVM_REG_PPC_SIAR:
1135 *val = get_reg_val(id, vcpu->arch.siar);
1137 case KVM_REG_PPC_SDAR:
1138 *val = get_reg_val(id, vcpu->arch.sdar);
1140 case KVM_REG_PPC_SIER:
1141 *val = get_reg_val(id, vcpu->arch.sier);
1143 case KVM_REG_PPC_IAMR:
1144 *val = get_reg_val(id, vcpu->arch.iamr);
1146 case KVM_REG_PPC_PSPB:
1147 *val = get_reg_val(id, vcpu->arch.pspb);
1149 case KVM_REG_PPC_DPDES:
1150 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1152 case KVM_REG_PPC_DAWR:
1153 *val = get_reg_val(id, vcpu->arch.dawr);
1155 case KVM_REG_PPC_DAWRX:
1156 *val = get_reg_val(id, vcpu->arch.dawrx);
1158 case KVM_REG_PPC_CIABR:
1159 *val = get_reg_val(id, vcpu->arch.ciabr);
1161 case KVM_REG_PPC_CSIGR:
1162 *val = get_reg_val(id, vcpu->arch.csigr);
1164 case KVM_REG_PPC_TACR:
1165 *val = get_reg_val(id, vcpu->arch.tacr);
1167 case KVM_REG_PPC_TCSCR:
1168 *val = get_reg_val(id, vcpu->arch.tcscr);
1170 case KVM_REG_PPC_PID:
1171 *val = get_reg_val(id, vcpu->arch.pid);
1173 case KVM_REG_PPC_ACOP:
1174 *val = get_reg_val(id, vcpu->arch.acop);
1176 case KVM_REG_PPC_WORT:
1177 *val = get_reg_val(id, vcpu->arch.wort);
1179 case KVM_REG_PPC_VPA_ADDR:
1180 spin_lock(&vcpu->arch.vpa_update_lock);
1181 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1182 spin_unlock(&vcpu->arch.vpa_update_lock);
1184 case KVM_REG_PPC_VPA_SLB:
1185 spin_lock(&vcpu->arch.vpa_update_lock);
1186 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1187 val->vpaval.length = vcpu->arch.slb_shadow.len;
1188 spin_unlock(&vcpu->arch.vpa_update_lock);
1190 case KVM_REG_PPC_VPA_DTL:
1191 spin_lock(&vcpu->arch.vpa_update_lock);
1192 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1193 val->vpaval.length = vcpu->arch.dtl.len;
1194 spin_unlock(&vcpu->arch.vpa_update_lock);
1196 case KVM_REG_PPC_TB_OFFSET:
1197 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1199 case KVM_REG_PPC_LPCR:
1200 case KVM_REG_PPC_LPCR_64:
1201 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1203 case KVM_REG_PPC_PPR:
1204 *val = get_reg_val(id, vcpu->arch.ppr);
1206 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1207 case KVM_REG_PPC_TFHAR:
1208 *val = get_reg_val(id, vcpu->arch.tfhar);
1210 case KVM_REG_PPC_TFIAR:
1211 *val = get_reg_val(id, vcpu->arch.tfiar);
1213 case KVM_REG_PPC_TEXASR:
1214 *val = get_reg_val(id, vcpu->arch.texasr);
1216 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1217 i = id - KVM_REG_PPC_TM_GPR0;
1218 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1220 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1223 i = id - KVM_REG_PPC_TM_VSR0;
1225 for (j = 0; j < TS_FPRWIDTH; j++)
1226 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1228 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1229 val->vval = vcpu->arch.vr_tm.vr[i-32];
1235 case KVM_REG_PPC_TM_CR:
1236 *val = get_reg_val(id, vcpu->arch.cr_tm);
1238 case KVM_REG_PPC_TM_LR:
1239 *val = get_reg_val(id, vcpu->arch.lr_tm);
1241 case KVM_REG_PPC_TM_CTR:
1242 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1244 case KVM_REG_PPC_TM_FPSCR:
1245 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1247 case KVM_REG_PPC_TM_AMR:
1248 *val = get_reg_val(id, vcpu->arch.amr_tm);
1250 case KVM_REG_PPC_TM_PPR:
1251 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1253 case KVM_REG_PPC_TM_VRSAVE:
1254 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1256 case KVM_REG_PPC_TM_VSCR:
1257 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1258 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1262 case KVM_REG_PPC_TM_DSCR:
1263 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1265 case KVM_REG_PPC_TM_TAR:
1266 *val = get_reg_val(id, vcpu->arch.tar_tm);
1269 case KVM_REG_PPC_ARCH_COMPAT:
1270 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1280 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1281 union kvmppc_one_reg *val)
1285 unsigned long addr, len;
1288 case KVM_REG_PPC_HIOR:
1289 /* Only allow this to be set to zero */
1290 if (set_reg_val(id, *val))
1293 case KVM_REG_PPC_DABR:
1294 vcpu->arch.dabr = set_reg_val(id, *val);
1296 case KVM_REG_PPC_DABRX:
1297 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1299 case KVM_REG_PPC_DSCR:
1300 vcpu->arch.dscr = set_reg_val(id, *val);
1302 case KVM_REG_PPC_PURR:
1303 vcpu->arch.purr = set_reg_val(id, *val);
1305 case KVM_REG_PPC_SPURR:
1306 vcpu->arch.spurr = set_reg_val(id, *val);
1308 case KVM_REG_PPC_AMR:
1309 vcpu->arch.amr = set_reg_val(id, *val);
1311 case KVM_REG_PPC_UAMOR:
1312 vcpu->arch.uamor = set_reg_val(id, *val);
1314 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1315 i = id - KVM_REG_PPC_MMCR0;
1316 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1318 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1319 i = id - KVM_REG_PPC_PMC1;
1320 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1322 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1323 i = id - KVM_REG_PPC_SPMC1;
1324 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1326 case KVM_REG_PPC_SIAR:
1327 vcpu->arch.siar = set_reg_val(id, *val);
1329 case KVM_REG_PPC_SDAR:
1330 vcpu->arch.sdar = set_reg_val(id, *val);
1332 case KVM_REG_PPC_SIER:
1333 vcpu->arch.sier = set_reg_val(id, *val);
1335 case KVM_REG_PPC_IAMR:
1336 vcpu->arch.iamr = set_reg_val(id, *val);
1338 case KVM_REG_PPC_PSPB:
1339 vcpu->arch.pspb = set_reg_val(id, *val);
1341 case KVM_REG_PPC_DPDES:
1342 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1344 case KVM_REG_PPC_DAWR:
1345 vcpu->arch.dawr = set_reg_val(id, *val);
1347 case KVM_REG_PPC_DAWRX:
1348 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1350 case KVM_REG_PPC_CIABR:
1351 vcpu->arch.ciabr = set_reg_val(id, *val);
1352 /* Don't allow setting breakpoints in hypervisor code */
1353 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1354 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1356 case KVM_REG_PPC_CSIGR:
1357 vcpu->arch.csigr = set_reg_val(id, *val);
1359 case KVM_REG_PPC_TACR:
1360 vcpu->arch.tacr = set_reg_val(id, *val);
1362 case KVM_REG_PPC_TCSCR:
1363 vcpu->arch.tcscr = set_reg_val(id, *val);
1365 case KVM_REG_PPC_PID:
1366 vcpu->arch.pid = set_reg_val(id, *val);
1368 case KVM_REG_PPC_ACOP:
1369 vcpu->arch.acop = set_reg_val(id, *val);
1371 case KVM_REG_PPC_WORT:
1372 vcpu->arch.wort = set_reg_val(id, *val);
1374 case KVM_REG_PPC_VPA_ADDR:
1375 addr = set_reg_val(id, *val);
1377 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1378 vcpu->arch.dtl.next_gpa))
1380 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1382 case KVM_REG_PPC_VPA_SLB:
1383 addr = val->vpaval.addr;
1384 len = val->vpaval.length;
1386 if (addr && !vcpu->arch.vpa.next_gpa)
1388 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1390 case KVM_REG_PPC_VPA_DTL:
1391 addr = val->vpaval.addr;
1392 len = val->vpaval.length;
1394 if (addr && (len < sizeof(struct dtl_entry) ||
1395 !vcpu->arch.vpa.next_gpa))
1397 len -= len % sizeof(struct dtl_entry);
1398 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1400 case KVM_REG_PPC_TB_OFFSET:
1401 /* round up to multiple of 2^24 */
1402 vcpu->arch.vcore->tb_offset =
1403 ALIGN(set_reg_val(id, *val), 1UL << 24);
1405 case KVM_REG_PPC_LPCR:
1406 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1408 case KVM_REG_PPC_LPCR_64:
1409 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1411 case KVM_REG_PPC_PPR:
1412 vcpu->arch.ppr = set_reg_val(id, *val);
1414 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1415 case KVM_REG_PPC_TFHAR:
1416 vcpu->arch.tfhar = set_reg_val(id, *val);
1418 case KVM_REG_PPC_TFIAR:
1419 vcpu->arch.tfiar = set_reg_val(id, *val);
1421 case KVM_REG_PPC_TEXASR:
1422 vcpu->arch.texasr = set_reg_val(id, *val);
1424 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1425 i = id - KVM_REG_PPC_TM_GPR0;
1426 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1428 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1431 i = id - KVM_REG_PPC_TM_VSR0;
1433 for (j = 0; j < TS_FPRWIDTH; j++)
1434 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1436 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1437 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1442 case KVM_REG_PPC_TM_CR:
1443 vcpu->arch.cr_tm = set_reg_val(id, *val);
1445 case KVM_REG_PPC_TM_LR:
1446 vcpu->arch.lr_tm = set_reg_val(id, *val);
1448 case KVM_REG_PPC_TM_CTR:
1449 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1451 case KVM_REG_PPC_TM_FPSCR:
1452 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1454 case KVM_REG_PPC_TM_AMR:
1455 vcpu->arch.amr_tm = set_reg_val(id, *val);
1457 case KVM_REG_PPC_TM_PPR:
1458 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1460 case KVM_REG_PPC_TM_VRSAVE:
1461 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1463 case KVM_REG_PPC_TM_VSCR:
1464 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1465 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1469 case KVM_REG_PPC_TM_DSCR:
1470 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1472 case KVM_REG_PPC_TM_TAR:
1473 vcpu->arch.tar_tm = set_reg_val(id, *val);
1476 case KVM_REG_PPC_ARCH_COMPAT:
1477 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1487 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1489 struct kvmppc_vcore *vcore;
1491 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1496 INIT_LIST_HEAD(&vcore->runnable_threads);
1497 spin_lock_init(&vcore->lock);
1498 spin_lock_init(&vcore->stoltb_lock);
1499 init_swait_queue_head(&vcore->wq);
1500 vcore->preempt_tb = TB_NIL;
1501 vcore->lpcr = kvm->arch.lpcr;
1502 vcore->first_vcpuid = core * threads_per_subcore;
1504 INIT_LIST_HEAD(&vcore->preempt_list);
1509 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1510 static struct debugfs_timings_element {
1514 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1515 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1516 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1517 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1518 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1521 #define N_TIMINGS (sizeof(timings) / sizeof(timings[0]))
1523 struct debugfs_timings_state {
1524 struct kvm_vcpu *vcpu;
1525 unsigned int buflen;
1526 char buf[N_TIMINGS * 100];
1529 static int debugfs_timings_open(struct inode *inode, struct file *file)
1531 struct kvm_vcpu *vcpu = inode->i_private;
1532 struct debugfs_timings_state *p;
1534 p = kzalloc(sizeof(*p), GFP_KERNEL);
1538 kvm_get_kvm(vcpu->kvm);
1540 file->private_data = p;
1542 return nonseekable_open(inode, file);
1545 static int debugfs_timings_release(struct inode *inode, struct file *file)
1547 struct debugfs_timings_state *p = file->private_data;
1549 kvm_put_kvm(p->vcpu->kvm);
1554 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1555 size_t len, loff_t *ppos)
1557 struct debugfs_timings_state *p = file->private_data;
1558 struct kvm_vcpu *vcpu = p->vcpu;
1560 struct kvmhv_tb_accumulator tb;
1569 buf_end = s + sizeof(p->buf);
1570 for (i = 0; i < N_TIMINGS; ++i) {
1571 struct kvmhv_tb_accumulator *acc;
1573 acc = (struct kvmhv_tb_accumulator *)
1574 ((unsigned long)vcpu + timings[i].offset);
1576 for (loops = 0; loops < 1000; ++loops) {
1577 count = acc->seqcount;
1582 if (count == acc->seqcount) {
1590 snprintf(s, buf_end - s, "%s: stuck\n",
1593 snprintf(s, buf_end - s,
1594 "%s: %llu %llu %llu %llu\n",
1595 timings[i].name, count / 2,
1596 tb_to_ns(tb.tb_total),
1597 tb_to_ns(tb.tb_min),
1598 tb_to_ns(tb.tb_max));
1601 p->buflen = s - p->buf;
1605 if (pos >= p->buflen)
1607 if (len > p->buflen - pos)
1608 len = p->buflen - pos;
1609 n = copy_to_user(buf, p->buf + pos, len);
1619 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1620 size_t len, loff_t *ppos)
1625 static const struct file_operations debugfs_timings_ops = {
1626 .owner = THIS_MODULE,
1627 .open = debugfs_timings_open,
1628 .release = debugfs_timings_release,
1629 .read = debugfs_timings_read,
1630 .write = debugfs_timings_write,
1631 .llseek = generic_file_llseek,
1634 /* Create a debugfs directory for the vcpu */
1635 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1638 struct kvm *kvm = vcpu->kvm;
1640 snprintf(buf, sizeof(buf), "vcpu%u", id);
1641 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1643 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1644 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1646 vcpu->arch.debugfs_timings =
1647 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1648 vcpu, &debugfs_timings_ops);
1651 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1652 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1655 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1657 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1660 struct kvm_vcpu *vcpu;
1663 struct kvmppc_vcore *vcore;
1665 core = id / threads_per_subcore;
1666 if (core >= KVM_MAX_VCORES)
1670 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1674 err = kvm_vcpu_init(vcpu, kvm, id);
1678 vcpu->arch.shared = &vcpu->arch.shregs;
1679 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1681 * The shared struct is never shared on HV,
1682 * so we can always use host endianness
1684 #ifdef __BIG_ENDIAN__
1685 vcpu->arch.shared_big_endian = true;
1687 vcpu->arch.shared_big_endian = false;
1690 vcpu->arch.mmcr[0] = MMCR0_FC;
1691 vcpu->arch.ctrl = CTRL_RUNLATCH;
1692 /* default to host PVR, since we can't spoof it */
1693 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1694 spin_lock_init(&vcpu->arch.vpa_update_lock);
1695 spin_lock_init(&vcpu->arch.tbacct_lock);
1696 vcpu->arch.busy_preempt = TB_NIL;
1697 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1699 kvmppc_mmu_book3s_hv_init(vcpu);
1701 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1703 init_waitqueue_head(&vcpu->arch.cpu_run);
1705 mutex_lock(&kvm->lock);
1706 vcore = kvm->arch.vcores[core];
1708 vcore = kvmppc_vcore_create(kvm, core);
1709 kvm->arch.vcores[core] = vcore;
1710 kvm->arch.online_vcores++;
1712 mutex_unlock(&kvm->lock);
1717 spin_lock(&vcore->lock);
1718 ++vcore->num_threads;
1719 spin_unlock(&vcore->lock);
1720 vcpu->arch.vcore = vcore;
1721 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1722 vcpu->arch.thread_cpu = -1;
1724 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1725 kvmppc_sanity_check(vcpu);
1727 debugfs_vcpu_init(vcpu, id);
1732 kmem_cache_free(kvm_vcpu_cache, vcpu);
1734 return ERR_PTR(err);
1737 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1739 if (vpa->pinned_addr)
1740 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1744 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
1746 spin_lock(&vcpu->arch.vpa_update_lock);
1747 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1748 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1749 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
1750 spin_unlock(&vcpu->arch.vpa_update_lock);
1751 kvm_vcpu_uninit(vcpu);
1752 kmem_cache_free(kvm_vcpu_cache, vcpu);
1755 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1757 /* Indicate we want to get back into the guest */
1761 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1763 unsigned long dec_nsec, now;
1766 if (now > vcpu->arch.dec_expires) {
1767 /* decrementer has already gone negative */
1768 kvmppc_core_queue_dec(vcpu);
1769 kvmppc_core_prepare_to_enter(vcpu);
1772 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1774 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
1776 vcpu->arch.timer_running = 1;
1779 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1781 vcpu->arch.ceded = 0;
1782 if (vcpu->arch.timer_running) {
1783 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1784 vcpu->arch.timer_running = 0;
1788 extern void __kvmppc_vcore_entry(void);
1790 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1791 struct kvm_vcpu *vcpu)
1795 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1797 spin_lock_irq(&vcpu->arch.tbacct_lock);
1799 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1800 vcpu->arch.stolen_logged;
1801 vcpu->arch.busy_preempt = now;
1802 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1803 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1805 list_del(&vcpu->arch.run_list);
1808 static int kvmppc_grab_hwthread(int cpu)
1810 struct paca_struct *tpaca;
1811 long timeout = 10000;
1815 /* Ensure the thread won't go into the kernel if it wakes */
1816 tpaca->kvm_hstate.kvm_vcpu = NULL;
1817 tpaca->kvm_hstate.kvm_vcore = NULL;
1818 tpaca->kvm_hstate.napping = 0;
1820 tpaca->kvm_hstate.hwthread_req = 1;
1823 * If the thread is already executing in the kernel (e.g. handling
1824 * a stray interrupt), wait for it to get back to nap mode.
1825 * The smp_mb() is to ensure that our setting of hwthread_req
1826 * is visible before we look at hwthread_state, so if this
1827 * races with the code at system_reset_pSeries and the thread
1828 * misses our setting of hwthread_req, we are sure to see its
1829 * setting of hwthread_state, and vice versa.
1832 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1833 if (--timeout <= 0) {
1834 pr_err("KVM: couldn't grab cpu %d\n", cpu);
1842 static void kvmppc_release_hwthread(int cpu)
1844 struct paca_struct *tpaca;
1847 tpaca->kvm_hstate.hwthread_req = 0;
1848 tpaca->kvm_hstate.kvm_vcpu = NULL;
1849 tpaca->kvm_hstate.kvm_vcore = NULL;
1850 tpaca->kvm_hstate.kvm_split_mode = NULL;
1853 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
1856 struct paca_struct *tpaca;
1857 struct kvmppc_vcore *mvc = vc->master_vcore;
1861 if (vcpu->arch.timer_running) {
1862 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1863 vcpu->arch.timer_running = 0;
1865 cpu += vcpu->arch.ptid;
1866 vcpu->cpu = mvc->pcpu;
1867 vcpu->arch.thread_cpu = cpu;
1870 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1871 tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
1872 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
1874 tpaca->kvm_hstate.kvm_vcore = mvc;
1875 if (cpu != smp_processor_id())
1876 kvmppc_ipi_thread(cpu);
1879 static void kvmppc_wait_for_nap(void)
1881 int cpu = smp_processor_id();
1884 for (loops = 0; loops < 1000000; ++loops) {
1886 * Check if all threads are finished.
1887 * We set the vcore pointer when starting a thread
1888 * and the thread clears it when finished, so we look
1889 * for any threads that still have a non-NULL vcore ptr.
1891 for (i = 1; i < threads_per_subcore; ++i)
1892 if (paca[cpu + i].kvm_hstate.kvm_vcore)
1894 if (i == threads_per_subcore) {
1901 for (i = 1; i < threads_per_subcore; ++i)
1902 if (paca[cpu + i].kvm_hstate.kvm_vcore)
1903 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
1907 * Check that we are on thread 0 and that any other threads in
1908 * this core are off-line. Then grab the threads so they can't
1911 static int on_primary_thread(void)
1913 int cpu = smp_processor_id();
1916 /* Are we on a primary subcore? */
1917 if (cpu_thread_in_subcore(cpu))
1921 while (++thr < threads_per_subcore)
1922 if (cpu_online(cpu + thr))
1925 /* Grab all hw threads so they can't go into the kernel */
1926 for (thr = 1; thr < threads_per_subcore; ++thr) {
1927 if (kvmppc_grab_hwthread(cpu + thr)) {
1928 /* Couldn't grab one; let the others go */
1930 kvmppc_release_hwthread(cpu + thr);
1931 } while (--thr > 0);
1939 * A list of virtual cores for each physical CPU.
1940 * These are vcores that could run but their runner VCPU tasks are
1941 * (or may be) preempted.
1943 struct preempted_vcore_list {
1944 struct list_head list;
1948 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
1950 static void init_vcore_lists(void)
1954 for_each_possible_cpu(cpu) {
1955 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
1956 spin_lock_init(&lp->lock);
1957 INIT_LIST_HEAD(&lp->list);
1961 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
1963 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
1965 vc->vcore_state = VCORE_PREEMPT;
1966 vc->pcpu = smp_processor_id();
1967 if (vc->num_threads < threads_per_subcore) {
1968 spin_lock(&lp->lock);
1969 list_add_tail(&vc->preempt_list, &lp->list);
1970 spin_unlock(&lp->lock);
1973 /* Start accumulating stolen time */
1974 kvmppc_core_start_stolen(vc);
1977 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
1979 struct preempted_vcore_list *lp;
1981 kvmppc_core_end_stolen(vc);
1982 if (!list_empty(&vc->preempt_list)) {
1983 lp = &per_cpu(preempted_vcores, vc->pcpu);
1984 spin_lock(&lp->lock);
1985 list_del_init(&vc->preempt_list);
1986 spin_unlock(&lp->lock);
1988 vc->vcore_state = VCORE_INACTIVE;
1992 * This stores information about the virtual cores currently
1993 * assigned to a physical core.
1997 int max_subcore_threads;
1999 int subcore_threads[MAX_SUBCORES];
2000 struct kvm *subcore_vm[MAX_SUBCORES];
2001 struct list_head vcs[MAX_SUBCORES];
2005 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2006 * respectively in 2-way micro-threading (split-core) mode.
2008 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2010 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2014 memset(cip, 0, sizeof(*cip));
2015 cip->n_subcores = 1;
2016 cip->max_subcore_threads = vc->num_threads;
2017 cip->total_threads = vc->num_threads;
2018 cip->subcore_threads[0] = vc->num_threads;
2019 cip->subcore_vm[0] = vc->kvm;
2020 for (sub = 0; sub < MAX_SUBCORES; ++sub)
2021 INIT_LIST_HEAD(&cip->vcs[sub]);
2022 list_add_tail(&vc->preempt_list, &cip->vcs[0]);
2025 static bool subcore_config_ok(int n_subcores, int n_threads)
2027 /* Can only dynamically split if unsplit to begin with */
2028 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2030 if (n_subcores > MAX_SUBCORES)
2032 if (n_subcores > 1) {
2033 if (!(dynamic_mt_modes & 2))
2035 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2039 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2042 static void init_master_vcore(struct kvmppc_vcore *vc)
2044 vc->master_vcore = vc;
2045 vc->entry_exit_map = 0;
2047 vc->napping_threads = 0;
2048 vc->conferring_threads = 0;
2052 * See if the existing subcores can be split into 3 (or fewer) subcores
2053 * of at most two threads each, so we can fit in another vcore. This
2054 * assumes there are at most two subcores and at most 6 threads in total.
2056 static bool can_split_piggybacked_subcores(struct core_info *cip)
2061 int n_subcores = cip->n_subcores;
2062 struct kvmppc_vcore *vc, *vcnext;
2063 struct kvmppc_vcore *master_vc = NULL;
2065 for (sub = 0; sub < cip->n_subcores; ++sub) {
2066 if (cip->subcore_threads[sub] <= 2)
2071 vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
2073 if (vc->num_threads > 2)
2075 n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
2077 if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
2081 * Seems feasible, so go through and move vcores to new subcores.
2082 * Note that when we have two or more vcores in one subcore,
2083 * all those vcores must have only one thread each.
2085 new_sub = cip->n_subcores;
2088 list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) {
2090 list_del(&vc->preempt_list);
2091 list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]);
2092 /* vc->num_threads must be 1 */
2093 if (++cip->subcore_threads[new_sub] == 1) {
2094 cip->subcore_vm[new_sub] = vc->kvm;
2095 init_master_vcore(vc);
2099 vc->master_vcore = master_vc;
2103 thr += vc->num_threads;
2105 cip->subcore_threads[large_sub] = 2;
2106 cip->max_subcore_threads = 2;
2111 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2113 int n_threads = vc->num_threads;
2116 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2119 if (n_threads < cip->max_subcore_threads)
2120 n_threads = cip->max_subcore_threads;
2121 if (subcore_config_ok(cip->n_subcores + 1, n_threads)) {
2122 cip->max_subcore_threads = n_threads;
2123 } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 &&
2124 vc->num_threads <= 2) {
2126 * We may be able to fit another subcore in by
2127 * splitting an existing subcore with 3 or 4
2128 * threads into two 2-thread subcores, or one
2129 * with 5 or 6 threads into three subcores.
2130 * We can only do this if those subcores have
2131 * piggybacked virtual cores.
2133 if (!can_split_piggybacked_subcores(cip))
2139 sub = cip->n_subcores;
2141 cip->total_threads += vc->num_threads;
2142 cip->subcore_threads[sub] = vc->num_threads;
2143 cip->subcore_vm[sub] = vc->kvm;
2144 init_master_vcore(vc);
2145 list_del(&vc->preempt_list);
2146 list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
2151 static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
2152 struct core_info *cip, int sub)
2154 struct kvmppc_vcore *vc;
2157 vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
2160 /* require same VM and same per-core reg values */
2161 if (pvc->kvm != vc->kvm ||
2162 pvc->tb_offset != vc->tb_offset ||
2163 pvc->pcr != vc->pcr ||
2164 pvc->lpcr != vc->lpcr)
2167 /* P8 guest with > 1 thread per core would see wrong TIR value */
2168 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
2169 (vc->num_threads > 1 || pvc->num_threads > 1))
2172 n_thr = cip->subcore_threads[sub] + pvc->num_threads;
2173 if (n_thr > cip->max_subcore_threads) {
2174 if (!subcore_config_ok(cip->n_subcores, n_thr))
2176 cip->max_subcore_threads = n_thr;
2179 cip->total_threads += pvc->num_threads;
2180 cip->subcore_threads[sub] = n_thr;
2181 pvc->master_vcore = vc;
2182 list_del(&pvc->preempt_list);
2183 list_add_tail(&pvc->preempt_list, &cip->vcs[sub]);
2189 * Work out whether it is possible to piggyback the execution of
2190 * vcore *pvc onto the execution of the other vcores described in *cip.
2192 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2197 if (cip->total_threads + pvc->num_threads > target_threads)
2199 for (sub = 0; sub < cip->n_subcores; ++sub)
2200 if (cip->subcore_threads[sub] &&
2201 can_piggyback_subcore(pvc, cip, sub))
2204 if (can_dynamic_split(pvc, cip))
2210 static void prepare_threads(struct kvmppc_vcore *vc)
2212 struct kvm_vcpu *vcpu, *vnext;
2214 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2216 if (signal_pending(vcpu->arch.run_task))
2217 vcpu->arch.ret = -EINTR;
2218 else if (vcpu->arch.vpa.update_pending ||
2219 vcpu->arch.slb_shadow.update_pending ||
2220 vcpu->arch.dtl.update_pending)
2221 vcpu->arch.ret = RESUME_GUEST;
2224 kvmppc_remove_runnable(vc, vcpu);
2225 wake_up(&vcpu->arch.cpu_run);
2229 static void collect_piggybacks(struct core_info *cip, int target_threads)
2231 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2232 struct kvmppc_vcore *pvc, *vcnext;
2234 spin_lock(&lp->lock);
2235 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2236 if (!spin_trylock(&pvc->lock))
2238 prepare_threads(pvc);
2239 if (!pvc->n_runnable) {
2240 list_del_init(&pvc->preempt_list);
2241 if (pvc->runner == NULL) {
2242 pvc->vcore_state = VCORE_INACTIVE;
2243 kvmppc_core_end_stolen(pvc);
2245 spin_unlock(&pvc->lock);
2248 if (!can_piggyback(pvc, cip, target_threads)) {
2249 spin_unlock(&pvc->lock);
2252 kvmppc_core_end_stolen(pvc);
2253 pvc->vcore_state = VCORE_PIGGYBACK;
2254 if (cip->total_threads >= target_threads)
2257 spin_unlock(&lp->lock);
2260 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2262 int still_running = 0;
2265 struct kvm_vcpu *vcpu, *vnext;
2267 spin_lock(&vc->lock);
2269 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2271 /* cancel pending dec exception if dec is positive */
2272 if (now < vcpu->arch.dec_expires &&
2273 kvmppc_core_pending_dec(vcpu))
2274 kvmppc_core_dequeue_dec(vcpu);
2276 trace_kvm_guest_exit(vcpu);
2279 if (vcpu->arch.trap)
2280 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
2281 vcpu->arch.run_task);
2283 vcpu->arch.ret = ret;
2284 vcpu->arch.trap = 0;
2286 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2287 if (vcpu->arch.pending_exceptions)
2288 kvmppc_core_prepare_to_enter(vcpu);
2289 if (vcpu->arch.ceded)
2290 kvmppc_set_timer(vcpu);
2294 kvmppc_remove_runnable(vc, vcpu);
2295 wake_up(&vcpu->arch.cpu_run);
2298 list_del_init(&vc->preempt_list);
2300 if (still_running > 0) {
2301 kvmppc_vcore_preempt(vc);
2302 } else if (vc->runner) {
2303 vc->vcore_state = VCORE_PREEMPT;
2304 kvmppc_core_start_stolen(vc);
2306 vc->vcore_state = VCORE_INACTIVE;
2308 if (vc->n_runnable > 0 && vc->runner == NULL) {
2309 /* make sure there's a candidate runner awake */
2310 vcpu = list_first_entry(&vc->runnable_threads,
2311 struct kvm_vcpu, arch.run_list);
2312 wake_up(&vcpu->arch.cpu_run);
2315 spin_unlock(&vc->lock);
2319 * Clear core from the list of active host cores as we are about to
2320 * enter the guest. Only do this if it is the primary thread of the
2321 * core (not if a subcore) that is entering the guest.
2323 static inline void kvmppc_clear_host_core(int cpu)
2327 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2330 * Memory barrier can be omitted here as we will do a smp_wmb()
2331 * later in kvmppc_start_thread and we need ensure that state is
2332 * visible to other CPUs only after we enter guest.
2334 core = cpu >> threads_shift;
2335 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
2339 * Advertise this core as an active host core since we exited the guest
2340 * Only need to do this if it is the primary thread of the core that is
2343 static inline void kvmppc_set_host_core(int cpu)
2347 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2351 * Memory barrier can be omitted here because we do a spin_unlock
2352 * immediately after this which provides the memory barrier.
2354 core = cpu >> threads_shift;
2355 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
2359 * Run a set of guest threads on a physical core.
2360 * Called with vc->lock held.
2362 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2364 struct kvm_vcpu *vcpu, *vnext;
2367 struct core_info core_info;
2368 struct kvmppc_vcore *pvc, *vcnext;
2369 struct kvm_split_mode split_info, *sip;
2370 int split, subcore_size, active;
2373 unsigned long cmd_bit, stat_bit;
2378 * Remove from the list any threads that have a signal pending
2379 * or need a VPA update done
2381 prepare_threads(vc);
2383 /* if the runner is no longer runnable, let the caller pick a new one */
2384 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
2390 init_master_vcore(vc);
2391 vc->preempt_tb = TB_NIL;
2394 * Make sure we are running on primary threads, and that secondary
2395 * threads are offline. Also check if the number of threads in this
2396 * guest are greater than the current system threads per guest.
2398 if ((threads_per_core > 1) &&
2399 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
2400 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2402 vcpu->arch.ret = -EBUSY;
2403 kvmppc_remove_runnable(vc, vcpu);
2404 wake_up(&vcpu->arch.cpu_run);
2410 * See if we could run any other vcores on the physical core
2411 * along with this one.
2413 init_core_info(&core_info, vc);
2414 pcpu = smp_processor_id();
2415 target_threads = threads_per_subcore;
2416 if (target_smt_mode && target_smt_mode < target_threads)
2417 target_threads = target_smt_mode;
2418 if (vc->num_threads < target_threads)
2419 collect_piggybacks(&core_info, target_threads);
2421 /* Decide on micro-threading (split-core) mode */
2422 subcore_size = threads_per_subcore;
2423 cmd_bit = stat_bit = 0;
2424 split = core_info.n_subcores;
2427 /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
2428 if (split == 2 && (dynamic_mt_modes & 2)) {
2429 cmd_bit = HID0_POWER8_1TO2LPAR;
2430 stat_bit = HID0_POWER8_2LPARMODE;
2433 cmd_bit = HID0_POWER8_1TO4LPAR;
2434 stat_bit = HID0_POWER8_4LPARMODE;
2436 subcore_size = MAX_SMT_THREADS / split;
2438 memset(&split_info, 0, sizeof(split_info));
2439 split_info.rpr = mfspr(SPRN_RPR);
2440 split_info.pmmar = mfspr(SPRN_PMMAR);
2441 split_info.ldbar = mfspr(SPRN_LDBAR);
2442 split_info.subcore_size = subcore_size;
2443 for (sub = 0; sub < core_info.n_subcores; ++sub)
2444 split_info.master_vcs[sub] =
2445 list_first_entry(&core_info.vcs[sub],
2446 struct kvmppc_vcore, preempt_list);
2447 /* order writes to split_info before kvm_split_mode pointer */
2450 pcpu = smp_processor_id();
2451 for (thr = 0; thr < threads_per_subcore; ++thr)
2452 paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
2454 /* Initiate micro-threading (split-core) if required */
2456 unsigned long hid0 = mfspr(SPRN_HID0);
2458 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
2460 mtspr(SPRN_HID0, hid0);
2463 hid0 = mfspr(SPRN_HID0);
2464 if (hid0 & stat_bit)
2470 kvmppc_clear_host_core(pcpu);
2472 /* Start all the threads */
2474 for (sub = 0; sub < core_info.n_subcores; ++sub) {
2475 thr = subcore_thread_map[sub];
2478 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
2479 pvc->pcpu = pcpu + thr;
2480 list_for_each_entry(vcpu, &pvc->runnable_threads,
2482 kvmppc_start_thread(vcpu, pvc);
2483 kvmppc_create_dtl_entry(vcpu, pvc);
2484 trace_kvm_guest_enter(vcpu);
2485 if (!vcpu->arch.ptid)
2487 active |= 1 << (thr + vcpu->arch.ptid);
2490 * We need to start the first thread of each subcore
2491 * even if it doesn't have a vcpu.
2493 if (pvc->master_vcore == pvc && !thr0_done)
2494 kvmppc_start_thread(NULL, pvc);
2495 thr += pvc->num_threads;
2500 * Ensure that split_info.do_nap is set after setting
2501 * the vcore pointer in the PACA of the secondaries.
2505 split_info.do_nap = 1; /* ask secondaries to nap when done */
2508 * When doing micro-threading, poke the inactive threads as well.
2509 * This gets them to the nap instruction after kvm_do_nap,
2510 * which reduces the time taken to unsplit later.
2513 for (thr = 1; thr < threads_per_subcore; ++thr)
2514 if (!(active & (1 << thr)))
2515 kvmppc_ipi_thread(pcpu + thr);
2517 vc->vcore_state = VCORE_RUNNING;
2520 trace_kvmppc_run_core(vc, 0);
2522 for (sub = 0; sub < core_info.n_subcores; ++sub)
2523 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
2524 spin_unlock(&pvc->lock);
2528 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2530 __kvmppc_vcore_entry();
2532 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2534 spin_lock(&vc->lock);
2535 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
2536 vc->vcore_state = VCORE_EXITING;
2538 /* wait for secondary threads to finish writing their state to memory */
2539 kvmppc_wait_for_nap();
2541 /* Return to whole-core mode if we split the core earlier */
2543 unsigned long hid0 = mfspr(SPRN_HID0);
2544 unsigned long loops = 0;
2546 hid0 &= ~HID0_POWER8_DYNLPARDIS;
2547 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
2549 mtspr(SPRN_HID0, hid0);
2552 hid0 = mfspr(SPRN_HID0);
2553 if (!(hid0 & stat_bit))
2558 split_info.do_nap = 0;
2561 /* Let secondaries go back to the offline loop */
2562 for (i = 0; i < threads_per_subcore; ++i) {
2563 kvmppc_release_hwthread(pcpu + i);
2564 if (sip && sip->napped[i])
2565 kvmppc_ipi_thread(pcpu + i);
2568 kvmppc_set_host_core(pcpu);
2570 spin_unlock(&vc->lock);
2572 /* make sure updates to secondary vcpu structs are visible now */
2576 for (sub = 0; sub < core_info.n_subcores; ++sub)
2577 list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
2579 post_guest_process(pvc, pvc == vc);
2581 spin_lock(&vc->lock);
2585 vc->vcore_state = VCORE_INACTIVE;
2586 trace_kvmppc_run_core(vc, 1);
2590 * Wait for some other vcpu thread to execute us, and
2591 * wake us up when we need to handle something in the host.
2593 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
2594 struct kvm_vcpu *vcpu, int wait_state)
2598 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
2599 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2600 spin_unlock(&vc->lock);
2602 spin_lock(&vc->lock);
2604 finish_wait(&vcpu->arch.cpu_run, &wait);
2608 * All the vcpus in this vcore are idle, so wait for a decrementer
2609 * or external interrupt to one of the vcpus. vc->lock is held.
2611 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2613 struct kvm_vcpu *vcpu;
2615 DECLARE_SWAITQUEUE(wait);
2617 prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2620 * Check one last time for pending exceptions and ceded state after
2621 * we put ourselves on the wait queue
2623 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
2624 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
2631 finish_swait(&vc->wq, &wait);
2635 vc->vcore_state = VCORE_SLEEPING;
2636 trace_kvmppc_vcore_blocked(vc, 0);
2637 spin_unlock(&vc->lock);
2639 finish_swait(&vc->wq, &wait);
2640 spin_lock(&vc->lock);
2641 vc->vcore_state = VCORE_INACTIVE;
2642 trace_kvmppc_vcore_blocked(vc, 1);
2645 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2648 struct kvmppc_vcore *vc;
2649 struct kvm_vcpu *v, *vn;
2651 trace_kvmppc_run_vcpu_enter(vcpu);
2653 kvm_run->exit_reason = 0;
2654 vcpu->arch.ret = RESUME_GUEST;
2655 vcpu->arch.trap = 0;
2656 kvmppc_update_vpas(vcpu);
2659 * Synchronize with other threads in this virtual core
2661 vc = vcpu->arch.vcore;
2662 spin_lock(&vc->lock);
2663 vcpu->arch.ceded = 0;
2664 vcpu->arch.run_task = current;
2665 vcpu->arch.kvm_run = kvm_run;
2666 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
2667 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
2668 vcpu->arch.busy_preempt = TB_NIL;
2669 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
2673 * This happens the first time this is called for a vcpu.
2674 * If the vcore is already running, we may be able to start
2675 * this thread straight away and have it join in.
2677 if (!signal_pending(current)) {
2678 if (vc->vcore_state == VCORE_PIGGYBACK) {
2679 struct kvmppc_vcore *mvc = vc->master_vcore;
2680 if (spin_trylock(&mvc->lock)) {
2681 if (mvc->vcore_state == VCORE_RUNNING &&
2682 !VCORE_IS_EXITING(mvc)) {
2683 kvmppc_create_dtl_entry(vcpu, vc);
2684 kvmppc_start_thread(vcpu, vc);
2685 trace_kvm_guest_enter(vcpu);
2687 spin_unlock(&mvc->lock);
2689 } else if (vc->vcore_state == VCORE_RUNNING &&
2690 !VCORE_IS_EXITING(vc)) {
2691 kvmppc_create_dtl_entry(vcpu, vc);
2692 kvmppc_start_thread(vcpu, vc);
2693 trace_kvm_guest_enter(vcpu);
2694 } else if (vc->vcore_state == VCORE_SLEEPING) {
2700 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2701 !signal_pending(current)) {
2702 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2703 kvmppc_vcore_end_preempt(vc);
2705 if (vc->vcore_state != VCORE_INACTIVE) {
2706 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
2709 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
2711 kvmppc_core_prepare_to_enter(v);
2712 if (signal_pending(v->arch.run_task)) {
2713 kvmppc_remove_runnable(vc, v);
2714 v->stat.signal_exits++;
2715 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
2716 v->arch.ret = -EINTR;
2717 wake_up(&v->arch.cpu_run);
2720 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2723 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
2724 if (!v->arch.pending_exceptions)
2725 n_ceded += v->arch.ceded;
2730 if (n_ceded == vc->n_runnable) {
2731 kvmppc_vcore_blocked(vc);
2732 } else if (need_resched()) {
2733 kvmppc_vcore_preempt(vc);
2734 /* Let something else run */
2735 cond_resched_lock(&vc->lock);
2736 if (vc->vcore_state == VCORE_PREEMPT)
2737 kvmppc_vcore_end_preempt(vc);
2739 kvmppc_run_core(vc);
2744 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2745 (vc->vcore_state == VCORE_RUNNING ||
2746 vc->vcore_state == VCORE_EXITING ||
2747 vc->vcore_state == VCORE_PIGGYBACK))
2748 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
2750 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2751 kvmppc_vcore_end_preempt(vc);
2753 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2754 kvmppc_remove_runnable(vc, vcpu);
2755 vcpu->stat.signal_exits++;
2756 kvm_run->exit_reason = KVM_EXIT_INTR;
2757 vcpu->arch.ret = -EINTR;
2760 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
2761 /* Wake up some vcpu to run the core */
2762 v = list_first_entry(&vc->runnable_threads,
2763 struct kvm_vcpu, arch.run_list);
2764 wake_up(&v->arch.cpu_run);
2767 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
2768 spin_unlock(&vc->lock);
2769 return vcpu->arch.ret;
2772 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2777 if (!vcpu->arch.sane) {
2778 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2782 kvmppc_core_prepare_to_enter(vcpu);
2784 /* No need to go into the guest when all we'll do is come back out */
2785 if (signal_pending(current)) {
2786 run->exit_reason = KVM_EXIT_INTR;
2790 atomic_inc(&vcpu->kvm->arch.vcpus_running);
2791 /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
2794 /* On the first time here, set up HTAB and VRMA */
2795 if (!vcpu->kvm->arch.hpte_setup_done) {
2796 r = kvmppc_hv_setup_htab_rma(vcpu);
2801 flush_all_to_thread(current);
2803 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2804 vcpu->arch.pgdir = current->mm->pgd;
2805 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2808 r = kvmppc_run_vcpu(run, vcpu);
2810 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
2811 !(vcpu->arch.shregs.msr & MSR_PR)) {
2812 trace_kvm_hcall_enter(vcpu);
2813 r = kvmppc_pseries_do_hcall(vcpu);
2814 trace_kvm_hcall_exit(vcpu, r);
2815 kvmppc_core_prepare_to_enter(vcpu);
2816 } else if (r == RESUME_PAGE_FAULT) {
2817 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2818 r = kvmppc_book3s_hv_page_fault(run, vcpu,
2819 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
2820 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2822 } while (is_kvmppc_resume_guest(r));
2825 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2826 atomic_dec(&vcpu->kvm->arch.vcpus_running);
2830 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2833 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2837 (*sps)->page_shift = def->shift;
2838 (*sps)->slb_enc = def->sllp;
2839 (*sps)->enc[0].page_shift = def->shift;
2840 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
2842 * Add 16MB MPSS support if host supports it
2844 if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2845 (*sps)->enc[1].page_shift = 24;
2846 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2851 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2852 struct kvm_ppc_smmu_info *info)
2854 struct kvm_ppc_one_seg_page_size *sps;
2856 info->flags = KVM_PPC_PAGE_SIZES_REAL;
2857 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2858 info->flags |= KVM_PPC_1T_SEGMENTS;
2859 info->slb_size = mmu_slb_size;
2861 /* We only support these sizes for now, and no muti-size segments */
2862 sps = &info->sps[0];
2863 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
2864 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
2865 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
2871 * Get (and clear) the dirty memory log for a memory slot.
2873 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2874 struct kvm_dirty_log *log)
2876 struct kvm_memslots *slots;
2877 struct kvm_memory_slot *memslot;
2881 mutex_lock(&kvm->slots_lock);
2884 if (log->slot >= KVM_USER_MEM_SLOTS)
2887 slots = kvm_memslots(kvm);
2888 memslot = id_to_memslot(slots, log->slot);
2890 if (!memslot->dirty_bitmap)
2893 n = kvm_dirty_bitmap_bytes(memslot);
2894 memset(memslot->dirty_bitmap, 0, n);
2896 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
2901 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
2906 mutex_unlock(&kvm->slots_lock);
2910 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2911 struct kvm_memory_slot *dont)
2913 if (!dont || free->arch.rmap != dont->arch.rmap) {
2914 vfree(free->arch.rmap);
2915 free->arch.rmap = NULL;
2919 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2920 unsigned long npages)
2922 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2923 if (!slot->arch.rmap)
2929 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2930 struct kvm_memory_slot *memslot,
2931 const struct kvm_userspace_memory_region *mem)
2936 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2937 const struct kvm_userspace_memory_region *mem,
2938 const struct kvm_memory_slot *old,
2939 const struct kvm_memory_slot *new)
2941 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2942 struct kvm_memslots *slots;
2943 struct kvm_memory_slot *memslot;
2945 if (npages && old->npages) {
2947 * If modifying a memslot, reset all the rmap dirty bits.
2948 * If this is a new memslot, we don't need to do anything
2949 * since the rmap array starts out as all zeroes,
2950 * i.e. no pages are dirty.
2952 slots = kvm_memslots(kvm);
2953 memslot = id_to_memslot(slots, mem->slot);
2954 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2959 * Update LPCR values in kvm->arch and in vcores.
2960 * Caller must hold kvm->lock.
2962 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
2967 if ((kvm->arch.lpcr & mask) == lpcr)
2970 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
2972 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2973 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2976 spin_lock(&vc->lock);
2977 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
2978 spin_unlock(&vc->lock);
2979 if (++cores_done >= kvm->arch.online_vcores)
2984 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
2989 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2992 struct kvm *kvm = vcpu->kvm;
2994 struct kvm_memory_slot *memslot;
2995 struct vm_area_struct *vma;
2996 unsigned long lpcr = 0, senc;
2997 unsigned long psize, porder;
3000 mutex_lock(&kvm->lock);
3001 if (kvm->arch.hpte_setup_done)
3002 goto out; /* another vcpu beat us to it */
3004 /* Allocate hashed page table (if not done already) and reset it */
3005 if (!kvm->arch.hpt_virt) {
3006 err = kvmppc_alloc_hpt(kvm, NULL);
3008 pr_err("KVM: Couldn't alloc HPT\n");
3013 /* Look up the memslot for guest physical address 0 */
3014 srcu_idx = srcu_read_lock(&kvm->srcu);
3015 memslot = gfn_to_memslot(kvm, 0);
3017 /* We must have some memory at 0 by now */
3019 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
3022 /* Look up the VMA for the start of this memory slot */
3023 hva = memslot->userspace_addr;
3024 down_read(¤t->mm->mmap_sem);
3025 vma = find_vma(current->mm, hva);
3026 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
3029 psize = vma_kernel_pagesize(vma);
3030 porder = __ilog2(psize);
3032 up_read(¤t->mm->mmap_sem);
3034 /* We can handle 4k, 64k or 16M pages in the VRMA */
3036 if (!(psize == 0x1000 || psize == 0x10000 ||
3037 psize == 0x1000000))
3040 /* Update VRMASD field in the LPCR */
3041 senc = slb_pgsize_encoding(psize);
3042 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
3043 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3044 /* the -4 is to account for senc values starting at 0x10 */
3045 lpcr = senc << (LPCR_VRMASD_SH - 4);
3047 /* Create HPTEs in the hash page table for the VRMA */
3048 kvmppc_map_vrma(vcpu, memslot, porder);
3050 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
3052 /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
3054 kvm->arch.hpte_setup_done = 1;
3057 srcu_read_unlock(&kvm->srcu, srcu_idx);
3059 mutex_unlock(&kvm->lock);
3063 up_read(¤t->mm->mmap_sem);
3067 #ifdef CONFIG_KVM_XICS
3068 static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
3071 unsigned long cpu = (long)hcpu;
3074 case CPU_UP_PREPARE:
3075 case CPU_UP_PREPARE_FROZEN:
3076 kvmppc_set_host_core(cpu);
3079 #ifdef CONFIG_HOTPLUG_CPU
3081 case CPU_DEAD_FROZEN:
3082 case CPU_UP_CANCELED:
3083 case CPU_UP_CANCELED_FROZEN:
3084 kvmppc_clear_host_core(cpu);
3094 static struct notifier_block kvmppc_cpu_notifier = {
3095 .notifier_call = kvmppc_cpu_notify,
3099 * Allocate a per-core structure for managing state about which cores are
3100 * running in the host versus the guest and for exchanging data between
3101 * real mode KVM and CPU running in the host.
3102 * This is only done for the first VM.
3103 * The allocated structure stays even if all VMs have stopped.
3104 * It is only freed when the kvm-hv module is unloaded.
3105 * It's OK for this routine to fail, we just don't support host
3106 * core operations like redirecting H_IPI wakeups.
3108 void kvmppc_alloc_host_rm_ops(void)
3110 struct kvmppc_host_rm_ops *ops;
3111 unsigned long l_ops;
3115 /* Not the first time here ? */
3116 if (kvmppc_host_rm_ops_hv != NULL)
3119 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
3123 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
3124 ops->rm_core = kzalloc(size, GFP_KERNEL);
3126 if (!ops->rm_core) {
3133 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
3134 if (!cpu_online(cpu))
3137 core = cpu >> threads_shift;
3138 ops->rm_core[core].rm_state.in_host = 1;
3141 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
3144 * Make the contents of the kvmppc_host_rm_ops structure visible
3145 * to other CPUs before we assign it to the global variable.
3146 * Do an atomic assignment (no locks used here), but if someone
3147 * beats us to it, just free our copy and return.
3150 l_ops = (unsigned long) ops;
3152 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
3154 kfree(ops->rm_core);
3159 register_cpu_notifier(&kvmppc_cpu_notifier);
3164 void kvmppc_free_host_rm_ops(void)
3166 if (kvmppc_host_rm_ops_hv) {
3167 unregister_cpu_notifier(&kvmppc_cpu_notifier);
3168 kfree(kvmppc_host_rm_ops_hv->rm_core);
3169 kfree(kvmppc_host_rm_ops_hv);
3170 kvmppc_host_rm_ops_hv = NULL;
3175 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
3177 unsigned long lpcr, lpid;
3180 /* Allocate the guest's logical partition ID */
3182 lpid = kvmppc_alloc_lpid();
3185 kvm->arch.lpid = lpid;
3187 kvmppc_alloc_host_rm_ops();
3190 * Since we don't flush the TLB when tearing down a VM,
3191 * and this lpid might have previously been used,
3192 * make sure we flush on each core before running the new VM.
3194 cpumask_setall(&kvm->arch.need_tlb_flush);
3196 /* Start out with the default set of hcalls enabled */
3197 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
3198 sizeof(kvm->arch.enabled_hcalls));
3200 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
3202 /* Init LPCR for virtual RMA mode */
3203 kvm->arch.host_lpid = mfspr(SPRN_LPID);
3204 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
3205 lpcr &= LPCR_PECE | LPCR_LPES;
3206 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
3207 LPCR_VPM0 | LPCR_VPM1;
3208 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
3209 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3210 /* On POWER8 turn on online bit to enable PURR/SPURR */
3211 if (cpu_has_feature(CPU_FTR_ARCH_207S))
3213 kvm->arch.lpcr = lpcr;
3216 * Track that we now have a HV mode VM active. This blocks secondary
3217 * CPU threads from coming online.
3219 kvm_hv_vm_activated();
3222 * Create a debugfs directory for the VM
3224 snprintf(buf, sizeof(buf), "vm%d", current->pid);
3225 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
3226 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
3227 kvmppc_mmu_debugfs_init(kvm);
3232 static void kvmppc_free_vcores(struct kvm *kvm)
3236 for (i = 0; i < KVM_MAX_VCORES; ++i)
3237 kfree(kvm->arch.vcores[i]);
3238 kvm->arch.online_vcores = 0;
3241 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
3243 debugfs_remove_recursive(kvm->arch.debugfs_dir);
3245 kvm_hv_vm_deactivated();
3247 kvmppc_free_vcores(kvm);
3249 kvmppc_free_hpt(kvm);
3252 /* We don't need to emulate any privileged instructions or dcbz */
3253 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3254 unsigned int inst, int *advance)
3256 return EMULATE_FAIL;
3259 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
3262 return EMULATE_FAIL;
3265 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
3268 return EMULATE_FAIL;
3271 static int kvmppc_core_check_processor_compat_hv(void)
3273 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
3274 !cpu_has_feature(CPU_FTR_ARCH_206))
3277 * Disable KVM for Power9, untill the required bits merged.
3279 if (cpu_has_feature(CPU_FTR_ARCH_300))
3285 static long kvm_arch_vm_ioctl_hv(struct file *filp,
3286 unsigned int ioctl, unsigned long arg)
3288 struct kvm *kvm __maybe_unused = filp->private_data;
3289 void __user *argp = (void __user *)arg;
3294 case KVM_PPC_ALLOCATE_HTAB: {
3298 if (get_user(htab_order, (u32 __user *)argp))
3300 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
3304 if (put_user(htab_order, (u32 __user *)argp))
3310 case KVM_PPC_GET_HTAB_FD: {
3311 struct kvm_get_htab_fd ghf;
3314 if (copy_from_user(&ghf, argp, sizeof(ghf)))
3316 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
3328 * List of hcall numbers to enable by default.
3329 * For compatibility with old userspace, we enable by default
3330 * all hcalls that were implemented before the hcall-enabling
3331 * facility was added. Note this list should not include H_RTAS.
3333 static unsigned int default_hcall_list[] = {
3347 #ifdef CONFIG_KVM_XICS
3358 static void init_default_hcalls(void)
3363 for (i = 0; default_hcall_list[i]; ++i) {
3364 hcall = default_hcall_list[i];
3365 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
3366 __set_bit(hcall / 4, default_enabled_hcalls);
3370 static struct kvmppc_ops kvm_ops_hv = {
3371 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
3372 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
3373 .get_one_reg = kvmppc_get_one_reg_hv,
3374 .set_one_reg = kvmppc_set_one_reg_hv,
3375 .vcpu_load = kvmppc_core_vcpu_load_hv,
3376 .vcpu_put = kvmppc_core_vcpu_put_hv,
3377 .set_msr = kvmppc_set_msr_hv,
3378 .vcpu_run = kvmppc_vcpu_run_hv,
3379 .vcpu_create = kvmppc_core_vcpu_create_hv,
3380 .vcpu_free = kvmppc_core_vcpu_free_hv,
3381 .check_requests = kvmppc_core_check_requests_hv,
3382 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
3383 .flush_memslot = kvmppc_core_flush_memslot_hv,
3384 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
3385 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
3386 .unmap_hva = kvm_unmap_hva_hv,
3387 .unmap_hva_range = kvm_unmap_hva_range_hv,
3388 .age_hva = kvm_age_hva_hv,
3389 .test_age_hva = kvm_test_age_hva_hv,
3390 .set_spte_hva = kvm_set_spte_hva_hv,
3391 .mmu_destroy = kvmppc_mmu_destroy_hv,
3392 .free_memslot = kvmppc_core_free_memslot_hv,
3393 .create_memslot = kvmppc_core_create_memslot_hv,
3394 .init_vm = kvmppc_core_init_vm_hv,
3395 .destroy_vm = kvmppc_core_destroy_vm_hv,
3396 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
3397 .emulate_op = kvmppc_core_emulate_op_hv,
3398 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
3399 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
3400 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
3401 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
3402 .hcall_implemented = kvmppc_hcall_impl_hv,
3405 static int kvm_init_subcore_bitmap(void)
3408 int nr_cores = cpu_nr_cores();
3409 struct sibling_subcore_state *sibling_subcore_state;
3411 for (i = 0; i < nr_cores; i++) {
3412 int first_cpu = i * threads_per_core;
3413 int node = cpu_to_node(first_cpu);
3415 /* Ignore if it is already allocated. */
3416 if (paca[first_cpu].sibling_subcore_state)
3419 sibling_subcore_state =
3420 kmalloc_node(sizeof(struct sibling_subcore_state),
3422 if (!sibling_subcore_state)
3425 memset(sibling_subcore_state, 0,
3426 sizeof(struct sibling_subcore_state));
3428 for (j = 0; j < threads_per_core; j++) {
3429 int cpu = first_cpu + j;
3431 paca[cpu].sibling_subcore_state = sibling_subcore_state;
3437 static int kvmppc_book3s_init_hv(void)
3441 * FIXME!! Do we need to check on all cpus ?
3443 r = kvmppc_core_check_processor_compat_hv();
3447 r = kvm_init_subcore_bitmap();
3451 kvm_ops_hv.owner = THIS_MODULE;
3452 kvmppc_hv_ops = &kvm_ops_hv;
3454 init_default_hcalls();
3458 r = kvmppc_mmu_hv_init();
3462 static void kvmppc_book3s_exit_hv(void)
3464 kvmppc_free_host_rm_ops();
3465 kvmppc_hv_ops = NULL;
3468 module_init(kvmppc_book3s_init_hv);
3469 module_exit(kvmppc_book3s_exit_hv);
3470 MODULE_LICENSE("GPL");
3471 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3472 MODULE_ALIAS("devname:kvm");