1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/preempt.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15 #include <linux/cma.h>
16 #include <linux/bitops.h>
18 #include <asm/asm-prototypes.h>
19 #include <asm/cputable.h>
20 #include <asm/interrupt.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/archrandom.h>
26 #include <asm/dbell.h>
27 #include <asm/cputhreads.h>
32 #define KVM_CMA_CHUNK_ORDER 18
34 #include "book3s_xics.h"
35 #include "book3s_xive.h"
38 * The XIVE module will populate these when it loads
40 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
41 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
42 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
44 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
45 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
46 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
47 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
48 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
49 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
50 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
53 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
54 * should be power of 2.
56 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
58 * By default we reserve 5% of memory for hash pagetable allocation.
60 static unsigned long kvm_cma_resv_ratio = 5;
62 static struct cma *kvm_cma;
64 static int __init early_parse_kvm_cma_resv(char *p)
66 pr_debug("%s(%s)\n", __func__, p);
69 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
71 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
73 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
75 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
77 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
80 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
82 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
84 cma_release(kvm_cma, page, nr_pages);
86 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
89 * kvm_cma_reserve() - reserve area for kvm hash pagetable
91 * This function reserves memory from early allocator. It should be
92 * called by arch specific code once the memblock allocator
93 * has been activated and all other subsystems have already allocated/reserved
96 void __init kvm_cma_reserve(void)
98 unsigned long align_size;
99 phys_addr_t selected_size;
102 * We need CMA reservation only when we are in HV mode
104 if (!cpu_has_feature(CPU_FTR_HVMODE))
107 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
109 pr_info("%s: reserving %ld MiB for global area\n", __func__,
110 (unsigned long)selected_size / SZ_1M);
111 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
112 cma_declare_contiguous(0, selected_size, 0, align_size,
113 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
119 * Real-mode H_CONFER implementation.
120 * We check if we are the only vcpu out of this virtual core
121 * still running in the guest and not ceded. If so, we pop up
122 * to the virtual-mode implementation; if not, just return to
125 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
126 unsigned int yield_count)
128 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
129 int ptid = local_paca->kvm_hstate.ptid;
132 int threads_conferring;
133 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
134 int rv = H_SUCCESS; /* => don't yield */
136 set_bit(ptid, &vc->conferring_threads);
137 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
138 threads_running = VCORE_ENTRY_MAP(vc);
139 threads_ceded = vc->napping_threads;
140 threads_conferring = vc->conferring_threads;
141 if ((threads_ceded | threads_conferring) == threads_running) {
142 rv = H_TOO_HARD; /* => do yield */
146 clear_bit(ptid, &vc->conferring_threads);
151 * When running HV mode KVM we need to block certain operations while KVM VMs
152 * exist in the system. We use a counter of VMs to track this.
154 * One of the operations we need to block is onlining of secondaries, so we
155 * protect hv_vm_count with get/put_online_cpus().
157 static atomic_t hv_vm_count;
159 void kvm_hv_vm_activated(void)
162 atomic_inc(&hv_vm_count);
165 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
167 void kvm_hv_vm_deactivated(void)
170 atomic_dec(&hv_vm_count);
173 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
175 bool kvm_hv_mode_active(void)
177 return atomic_read(&hv_vm_count) != 0;
180 extern int hcall_real_table[], hcall_real_table_end[];
182 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
185 if (cmd < hcall_real_table_end - hcall_real_table &&
186 hcall_real_table[cmd])
191 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
193 int kvmppc_hwrng_present(void)
195 return powernv_hwrng_present();
197 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
199 long kvmppc_h_random(struct kvm_vcpu *vcpu)
203 /* Only need to do the expensive mfmsr() on radix */
204 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
205 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
207 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
215 * Send an interrupt or message to another CPU.
216 * The caller needs to include any barrier needed to order writes
217 * to memory vs. the IPI/message.
219 void kvmhv_rm_send_ipi(int cpu)
221 void __iomem *xics_phys;
222 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
224 /* For a nested hypervisor, use the XICS via hcall */
225 if (kvmhv_on_pseries()) {
226 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
228 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
233 /* On POWER9 we can use msgsnd for any destination cpu. */
234 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
235 msg |= get_hard_smp_processor_id(cpu);
236 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
240 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
241 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
242 cpu_first_thread_sibling(cpu) ==
243 cpu_first_thread_sibling(raw_smp_processor_id())) {
244 msg |= cpu_thread_in_core(cpu);
245 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
249 /* We should never reach this */
250 if (WARN_ON_ONCE(xics_on_xive()))
253 /* Else poke the target with an IPI */
254 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
256 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
258 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
262 * The following functions are called from the assembly code
263 * in book3s_hv_rmhandlers.S.
265 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
269 /* Order setting of exit map vs. msgsnd/IPI */
271 for (; active; active >>= 1, ++cpu)
273 kvmhv_rm_send_ipi(cpu);
276 void kvmhv_commence_exit(int trap)
278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
279 int ptid = local_paca->kvm_hstate.ptid;
280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
284 /* Set our bit in the threads-exiting-guest map in the 0xff00
285 bits of vcore->entry_exit_map */
288 ee = vc->entry_exit_map;
289 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
291 /* Are we the first here? */
296 * Trigger the other threads in this vcore to exit the guest.
297 * If this is a hypervisor decrementer interrupt then they
298 * will be already on their way out of the guest.
300 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
301 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
304 * If we are doing dynamic micro-threading, interrupt the other
305 * subcores to pull them out of their guests too.
310 for (i = 0; i < MAX_SUBCORES; ++i) {
315 ee = vc->entry_exit_map;
316 /* Already asked to exit? */
319 } while (cmpxchg(&vc->entry_exit_map, ee,
320 ee | VCORE_EXIT_REQ) != ee);
322 kvmhv_interrupt_vcore(vc, ee);
326 * On POWER9 when running a HPT guest on a radix host (sip != NULL),
327 * we have to interrupt inactive CPU threads to get them to
328 * restore the host LPCR value.
331 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
332 vc = local_paca->kvm_hstate.kvm_vcore;
333 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
334 for (t = 1; t < threads_per_core; ++t) {
336 kvmhv_rm_send_ipi(cpu0 + t);
342 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
343 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
345 #ifdef CONFIG_KVM_XICS
346 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
352 * We access the mapped array here without a lock. That
353 * is safe because we never reduce the number of entries
354 * in the array and we never change the v_hwirq field of
355 * an entry once it is set.
357 * We have also carefully ordered the stores in the writer
358 * and the loads here in the reader, so that if we find a matching
359 * hwirq here, the associated GSI and irq_desc fields are valid.
361 for (i = 0; i < pimap->n_mapped; i++) {
362 if (xisr == pimap->mapped[i].r_hwirq) {
364 * Order subsequent reads in the caller to serialize
368 return &pimap->mapped[i];
375 * If we have an interrupt that's not an IPI, check if we have a
376 * passthrough adapter and if so, check if this external interrupt
377 * is for the adapter.
378 * We will attempt to deliver the IRQ directly to the target VCPU's
379 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
381 * If the delivery fails or if this is not for a passthrough adapter,
382 * return to the host to handle this interrupt. We earlier
383 * saved a copy of the XIRR in the PACA, it will be picked up by
384 * the host ICP driver.
386 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
388 struct kvmppc_passthru_irqmap *pimap;
389 struct kvmppc_irq_map *irq_map;
390 struct kvm_vcpu *vcpu;
392 vcpu = local_paca->kvm_hstate.kvm_vcpu;
395 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
398 irq_map = get_irqmap(pimap, xisr);
402 /* We're handling this interrupt, generic code doesn't need to */
403 local_paca->kvm_hstate.saved_xirr = 0;
405 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
409 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
416 * Determine what sort of external interrupt is pending (if any).
418 * 0 if no interrupt is pending
419 * 1 if an interrupt is pending that needs to be handled by the host
420 * 2 Passthrough that needs completion in the host
421 * -1 if there was a guest wakeup IPI (which has now been cleared)
422 * -2 if there is PCI passthrough external interrupt that was handled
424 static long kvmppc_read_one_intr(bool *again);
426 long kvmppc_read_intr(void)
437 rc = kvmppc_read_one_intr(&again);
438 if (rc && (ret == 0 || rc > ret))
444 static long kvmppc_read_one_intr(bool *again)
446 void __iomem *xics_phys;
456 /* see if a host IPI is pending */
457 host_ipi = local_paca->kvm_hstate.host_ipi;
461 /* Now read the interrupt from the ICP */
462 if (kvmhv_on_pseries()) {
463 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
465 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
466 xirr = cpu_to_be32(retbuf[0]);
468 xics_phys = local_paca->kvm_hstate.xics_phys;
471 rc = opal_int_get_xirr(&xirr, false);
473 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
479 * Save XIRR for later. Since we get control in reverse endian
480 * on LE systems, save it byte reversed and fetch it back in
481 * host endian. Note that xirr is the value read from the
482 * XIRR register, while h_xirr is the host endian version.
484 h_xirr = be32_to_cpu(xirr);
485 local_paca->kvm_hstate.saved_xirr = h_xirr;
486 xisr = h_xirr & 0xffffff;
488 * Ensure that the store/load complete to guarantee all side
489 * effects of loading from XIRR has completed
493 /* if nothing pending in the ICP */
497 /* We found something in the ICP...
499 * If it is an IPI, clear the MFRR and EOI it.
501 if (xisr == XICS_IPI) {
503 if (kvmhv_on_pseries()) {
504 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
506 plpar_hcall_raw(H_IPI, retbuf,
507 hard_smp_processor_id(), 0xff);
508 plpar_hcall_raw(H_EOI, retbuf, h_xirr);
509 } else if (xics_phys) {
510 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
511 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
513 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
514 rc = opal_int_eoi(h_xirr);
516 /* If rc > 0, there is another interrupt pending */
520 * Need to ensure side effects of above stores
521 * complete before proceeding.
526 * We need to re-check host IPI now in case it got set in the
527 * meantime. If it's clear, we bounce the interrupt to the
530 host_ipi = local_paca->kvm_hstate.host_ipi;
531 if (unlikely(host_ipi != 0)) {
532 /* We raced with the host,
533 * we need to resend that IPI, bummer
535 if (kvmhv_on_pseries()) {
536 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
538 plpar_hcall_raw(H_IPI, retbuf,
539 hard_smp_processor_id(),
541 } else if (xics_phys)
542 __raw_rm_writeb(IPI_PRIORITY,
543 xics_phys + XICS_MFRR);
545 opal_int_set_mfrr(hard_smp_processor_id(),
547 /* Let side effects complete */
552 /* OK, it's an IPI for us */
553 local_paca->kvm_hstate.saved_xirr = 0;
557 return kvmppc_check_passthru(xisr, xirr, again);
560 #ifdef CONFIG_KVM_XICS
561 static inline bool is_rm(void)
563 return !(mfmsr() & MSR_DR);
566 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
568 if (!kvmppc_xics_enabled(vcpu))
570 if (xics_on_xive()) {
572 return xive_rm_h_xirr(vcpu);
573 if (unlikely(!__xive_vm_h_xirr))
574 return H_NOT_AVAILABLE;
575 return __xive_vm_h_xirr(vcpu);
577 return xics_rm_h_xirr(vcpu);
580 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
582 if (!kvmppc_xics_enabled(vcpu))
584 vcpu->arch.regs.gpr[5] = get_tb();
585 if (xics_on_xive()) {
587 return xive_rm_h_xirr(vcpu);
588 if (unlikely(!__xive_vm_h_xirr))
589 return H_NOT_AVAILABLE;
590 return __xive_vm_h_xirr(vcpu);
592 return xics_rm_h_xirr(vcpu);
595 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
597 if (!kvmppc_xics_enabled(vcpu))
599 if (xics_on_xive()) {
601 return xive_rm_h_ipoll(vcpu, server);
602 if (unlikely(!__xive_vm_h_ipoll))
603 return H_NOT_AVAILABLE;
604 return __xive_vm_h_ipoll(vcpu, server);
609 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
612 if (!kvmppc_xics_enabled(vcpu))
614 if (xics_on_xive()) {
616 return xive_rm_h_ipi(vcpu, server, mfrr);
617 if (unlikely(!__xive_vm_h_ipi))
618 return H_NOT_AVAILABLE;
619 return __xive_vm_h_ipi(vcpu, server, mfrr);
621 return xics_rm_h_ipi(vcpu, server, mfrr);
624 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
626 if (!kvmppc_xics_enabled(vcpu))
628 if (xics_on_xive()) {
630 return xive_rm_h_cppr(vcpu, cppr);
631 if (unlikely(!__xive_vm_h_cppr))
632 return H_NOT_AVAILABLE;
633 return __xive_vm_h_cppr(vcpu, cppr);
635 return xics_rm_h_cppr(vcpu, cppr);
638 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
640 if (!kvmppc_xics_enabled(vcpu))
642 if (xics_on_xive()) {
644 return xive_rm_h_eoi(vcpu, xirr);
645 if (unlikely(!__xive_vm_h_eoi))
646 return H_NOT_AVAILABLE;
647 return __xive_vm_h_eoi(vcpu, xirr);
649 return xics_rm_h_eoi(vcpu, xirr);
651 #endif /* CONFIG_KVM_XICS */
653 void kvmppc_bad_interrupt(struct pt_regs *regs)
656 * 100 could happen at any time, 200 can happen due to invalid real
657 * address access for example (or any time due to a hardware problem).
659 if (TRAP(regs) == 0x100) {
660 get_paca()->in_nmi++;
661 system_reset_exception(regs);
662 get_paca()->in_nmi--;
663 } else if (TRAP(regs) == 0x200) {
664 machine_check_exception(regs);
666 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
668 panic("Bad KVM trap");
672 * Functions used to switch LPCR HR and UPRT bits on all threads
673 * when entering and exiting HPT guests on a radix host.
676 #define PHASE_REALMODE 1 /* in real mode */
677 #define PHASE_SET_LPCR 2 /* have set LPCR */
678 #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
679 #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
681 #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
683 static void wait_for_sync(struct kvm_split_mode *sip, int phase)
685 int thr = local_paca->kvm_hstate.tid;
687 sip->lpcr_sync.phase[thr] |= phase;
689 while ((sip->lpcr_sync.allphases & phase) != phase) {
696 void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
699 unsigned long rb, set;
701 /* wait for every other thread to get to real mode */
702 wait_for_sync(sip, PHASE_REALMODE);
704 /* Set LPCR and LPIDR */
705 mtspr(SPRN_LPCR, sip->lpcr_req);
706 mtspr(SPRN_LPID, sip->lpidr_req);
710 * P10 will flush all the congruence class with a single tlbiel
712 if (cpu_has_feature(CPU_FTR_ARCH_31))
715 num_sets = POWER9_TLB_SETS_RADIX;
717 /* Invalidate the TLB on thread 0 */
718 if (local_paca->kvm_hstate.tid == 0) {
720 asm volatile("ptesync" : : : "memory");
721 for (set = 0; set < num_sets; ++set) {
722 rb = TLBIEL_INVAL_SET_LPID +
723 (set << TLBIEL_INVAL_SET_SHIFT);
724 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
727 asm volatile("ptesync" : : : "memory");
730 /* indicate that we have done so and wait for others */
731 wait_for_sync(sip, PHASE_SET_LPCR);
732 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
737 * Called when a thread that has been in the guest needs
738 * to reload the host LPCR value - but only on POWER9 when
739 * running a HPT guest on a radix host.
741 void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
743 /* we're out of the guest... */
744 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
747 mtspr(SPRN_LPCR, sip->host_lpcr);
750 if (local_paca->kvm_hstate.tid == 0) {
752 smp_wmb(); /* order store of do_restore vs. phase */
755 wait_for_sync(sip, PHASE_RESET_LPCR);
757 local_paca->kvm_hstate.kvm_split_mode = NULL;
760 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
762 vcpu->arch.ceded = 0;
763 if (vcpu->arch.timer_running) {
764 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
765 vcpu->arch.timer_running = 0;
769 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
772 * Check for illegal transactional state bit combination
773 * and if we find it, force the TS field to a safe state.
775 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
777 vcpu->arch.shregs.msr = msr;
778 kvmppc_end_cede(vcpu);
780 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
782 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
784 unsigned long msr, pc, new_msr, new_pc;
786 msr = kvmppc_get_msr(vcpu);
787 pc = kvmppc_get_pc(vcpu);
788 new_msr = vcpu->arch.intr_msr;
791 /* If transactional, change to suspend mode on IRQ delivery */
792 if (MSR_TM_TRANSACTIONAL(msr))
795 new_msr |= msr & MSR_TS_MASK;
798 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
799 * applicable. AIL=2 is not supported.
801 * AIL does not apply to SRESET, MCE, or HMI (which is never
802 * delivered to the guest), and does not apply if IR=0 or DR=0.
804 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
805 vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
806 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
807 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
808 new_msr |= MSR_IR | MSR_DR;
809 new_pc += 0xC000000000004000ULL;
812 kvmppc_set_srr0(vcpu, pc);
813 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
814 kvmppc_set_pc(vcpu, new_pc);
815 vcpu->arch.shregs.msr = new_msr;
818 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
820 inject_interrupt(vcpu, vec, srr1_flags);
821 kvmppc_end_cede(vcpu);
823 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
826 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
827 * Can we inject a Decrementer or a External interrupt?
829 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
834 /* Insert EXTERNAL bit into LPCR at the MER bit position */
835 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
836 lpcr = mfspr(SPRN_LPCR);
837 lpcr |= ext << LPCR_MER_SH;
838 mtspr(SPRN_LPCR, lpcr);
841 if (vcpu->arch.shregs.msr & MSR_EE) {
843 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
845 long int dec = mfspr(SPRN_DEC);
846 if (!(lpcr & LPCR_LD))
849 inject_interrupt(vcpu,
850 BOOK3S_INTERRUPT_DECREMENTER, 0);
854 if (vcpu->arch.doorbell_request) {
855 mtspr(SPRN_DPDES, 1);
856 vcpu->arch.vcore->dpdes = 1;
858 vcpu->arch.doorbell_request = 0;
862 static void flush_guest_tlb(struct kvm *kvm)
864 unsigned long rb, set;
866 rb = PPC_BIT(52); /* IS = 2 */
867 if (kvm_is_radix(kvm)) {
868 /* R=1 PRS=1 RIC=2 */
869 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
870 : : "r" (rb), "i" (1), "i" (1), "i" (2),
872 for (set = 1; set < kvm->arch.tlb_sets; ++set) {
873 rb += PPC_BIT(51); /* increment set number */
874 /* R=1 PRS=1 RIC=0 */
875 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
876 : : "r" (rb), "i" (1), "i" (1), "i" (0),
879 asm volatile("ptesync": : :"memory");
880 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
882 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
883 /* R=0 PRS=0 RIC=0 */
884 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
885 : : "r" (rb), "i" (0), "i" (0), "i" (0),
887 rb += PPC_BIT(51); /* increment set number */
889 asm volatile("ptesync": : :"memory");
890 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
894 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
895 struct kvm_nested_guest *nested)
897 cpumask_t *need_tlb_flush;
900 * On POWER9, individual threads can come in here, but the
901 * TLB is shared between the 4 threads in a core, hence
902 * invalidating on one thread invalidates for all.
903 * Thus we make all 4 threads use the same bit.
905 if (cpu_has_feature(CPU_FTR_ARCH_300))
906 pcpu = cpu_first_thread_sibling(pcpu);
909 need_tlb_flush = &nested->need_tlb_flush;
911 need_tlb_flush = &kvm->arch.need_tlb_flush;
913 if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
914 flush_guest_tlb(kvm);
916 /* Clear the bit after the TLB flush */
917 cpumask_clear_cpu(pcpu, need_tlb_flush);
920 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);