1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 * Derived from book3s_rmhandlers.S and other files, which are:
8 * Copyright SUSE Linux Products GmbH 2009
10 * Authors: Alexander Graf <agraf@suse.de>
13 #include <asm/ppc_asm.h>
14 #include <asm/code-patching-asm.h>
15 #include <asm/kvm_asm.h>
19 #include <asm/ptrace.h>
20 #include <asm/hvcall.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/exception-64s.h>
23 #include <asm/kvm_book3s_asm.h>
24 #include <asm/book3s/64/mmu-hash.h>
25 #include <asm/export.h>
28 #include <asm/xive-regs.h>
29 #include <asm/thread_info.h>
30 #include <asm/asm-compat.h>
31 #include <asm/feature-fixups.h>
32 #include <asm/cpuidle.h>
33 #include <asm/ultravisor-api.h>
35 /* Sign-extend HDEC if not on POWER9 */
36 #define EXTEND_HDEC(reg) \
39 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41 /* Values in HSTATE_NAPPING(r13) */
42 #define NAPPING_CEDE 1
43 #define NAPPING_NOVCPU 2
44 #define NAPPING_UNSPLIT 3
46 /* Stack frame offsets for kvmppc_hv_entry */
48 #define STACK_SLOT_TRAP (SFS-4)
49 #define STACK_SLOT_SHORT_PATH (SFS-8)
50 #define STACK_SLOT_TID (SFS-16)
51 #define STACK_SLOT_PSSCR (SFS-24)
52 #define STACK_SLOT_PID (SFS-32)
53 #define STACK_SLOT_IAMR (SFS-40)
54 #define STACK_SLOT_CIABR (SFS-48)
55 #define STACK_SLOT_DAWR0 (SFS-56)
56 #define STACK_SLOT_DAWRX0 (SFS-64)
57 #define STACK_SLOT_HFSCR (SFS-72)
58 #define STACK_SLOT_AMR (SFS-80)
59 #define STACK_SLOT_UAMOR (SFS-88)
60 #define STACK_SLOT_DAWR1 (SFS-96)
61 #define STACK_SLOT_DAWRX1 (SFS-104)
62 /* the following is used by the P9 short path */
63 #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
66 * Call kvmppc_hv_entry in real mode.
67 * Must be called with interrupts hard-disabled.
71 * LR = return address to continue at after eventually re-enabling MMU
73 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
75 std r0, PPC_LR_STKOFF(r1)
78 std r10, HSTATE_HOST_MSR(r13)
79 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
84 mtmsrd r0,1 /* clear RI in MSR */
90 ld r4, HSTATE_KVM_VCPU(r13)
93 /* Back from guest - restore host state and return to caller */
96 /* Restore host DABR and DABRX */
97 ld r5,HSTATE_DABR(r13)
101 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
104 ld r3,PACA_SPRG_VDSO(r13)
105 mtspr SPRN_SPRG_VDSO_WRITE,r3
107 /* Reload the host's PMU registers */
108 bl kvmhv_load_host_pmu
111 * Reload DEC. HDEC interrupts were disabled when
112 * we reloaded the host's LPCR value.
114 ld r3, HSTATE_DECEXP(r13)
119 /* hwthread_req may have got set by cede or no vcpu, so clear it */
121 stb r0, HSTATE_HWTHREAD_REQ(r13)
124 * For external interrupts we need to call the Linux
125 * handler to process the interrupt. We do that by jumping
126 * to absolute address 0x500 for external interrupts.
127 * The [h]rfid at the end of the handler will return to
128 * the book3s_hv_interrupts.S code. For other interrupts
129 * we do the rfid to get back to the book3s_hv_interrupts.S
132 ld r8, 112+PPC_LR_STKOFF(r1)
134 ld r7, HSTATE_HOST_MSR(r13)
136 /* Return the trap number on this thread as the return value */
140 * If we came back from the guest via a relocation-on interrupt,
141 * we will be in virtual mode at this point, which makes it a
142 * little easier to get back to the caller.
145 andi. r0, r0, MSR_IR /* in real mode? */
148 /* RFI into the highmem handler */
152 mtmsrd r6, 1 /* Clear RI in MSR */
157 /* Virtual-mode return */
162 kvmppc_primary_no_guest:
163 /* We handle this much like a ceded vcpu */
164 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
165 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
166 /* HDEC value came from DEC in the first place, it will fit */
170 * Make sure the primary has finished the MMU switch.
171 * We should never get here on a secondary thread, but
172 * check it for robustness' sake.
174 ld r5, HSTATE_KVM_VCORE(r13)
175 65: lbz r0, VCORE_IN_GUEST(r5)
182 /* set our bit in napping_threads */
183 ld r5, HSTATE_KVM_VCORE(r13)
184 lbz r7, HSTATE_PTID(r13)
187 addi r6, r5, VCORE_NAPPING_THREADS
192 /* order napping_threads update vs testing entry_exit_map */
195 lwz r7, VCORE_ENTRY_EXIT(r5)
197 bge kvm_novcpu_exit /* another thread already exiting */
198 li r3, NAPPING_NOVCPU
199 stb r3, HSTATE_NAPPING(r13)
201 li r3, 0 /* Don't wake on privileged (OS) doorbell */
206 * Entered from kvm_start_guest if kvm_hstate.napping is set
212 ld r1, HSTATE_HOST_R1(r13)
213 ld r5, HSTATE_KVM_VCORE(r13)
215 stb r0, HSTATE_NAPPING(r13)
217 /* check the wake reason */
218 bl kvmppc_check_wake_reason
221 * Restore volatile registers since we could have called
222 * a C routine in kvmppc_check_wake_reason.
225 ld r5, HSTATE_KVM_VCORE(r13)
227 /* see if any other thread is already exiting */
228 lwz r0, VCORE_ENTRY_EXIT(r5)
232 /* clear our bit in napping_threads */
233 lbz r7, HSTATE_PTID(r13)
236 addi r6, r5, VCORE_NAPPING_THREADS
242 /* See if the wake reason means we need to exit */
246 /* See if our timeslice has expired (HDEC is negative) */
249 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
253 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
254 ld r4, HSTATE_KVM_VCPU(r13)
256 beq kvmppc_primary_no_guest
258 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
259 addi r3, r4, VCPU_TB_RMENTRY
260 bl kvmhv_start_timing
265 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
266 ld r4, HSTATE_KVM_VCPU(r13)
269 addi r3, r4, VCPU_TB_RMEXIT
270 bl kvmhv_accumulate_time
273 stw r12, STACK_SLOT_TRAP(r1)
274 bl kvmhv_commence_exit
276 b kvmhv_switch_to_host
279 * We come in here when wakened from Linux offline idle code.
281 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
283 _GLOBAL(idle_kvm_start_guest)
284 ld r4,PACAEMERGSP(r13)
290 subi r1,r4,STACK_FRAME_OVERHEAD
294 * Could avoid this and pass it through in r3. For now,
295 * code expects it to be in SRR1.
300 stb r0,PACA_FTRACE_ENABLED(r13)
302 li r0,KVM_HWTHREAD_IN_KVM
303 stb r0,HSTATE_HWTHREAD_STATE(r13)
305 /* kvm cede / napping does not come through here */
306 lbz r0,HSTATE_NAPPING(r13)
313 stb r0, HSTATE_NAPPING(r13)
318 * We weren't napping due to cede, so this must be a secondary
319 * thread being woken up to run a guest, or being woken up due
320 * to a stray IPI. (Or due to some machine check or hypervisor
321 * maintenance interrupt while the core is in KVM.)
324 /* Check the wake reason in SRR1 to see why we got here */
325 bl kvmppc_check_wake_reason
327 * kvmppc_check_wake_reason could invoke a C routine, but we
328 * have no volatile registers to restore when we return.
334 /* get vcore pointer, NULL if we have nothing to run */
335 ld r5,HSTATE_KVM_VCORE(r13)
337 /* if we have no vcore to run, go back to sleep */
340 kvm_secondary_got_guest:
342 /* Set HSTATE_DSCR(r13) to something sensible */
343 ld r6, PACA_DSCR_DEFAULT(r13)
344 std r6, HSTATE_DSCR(r13)
346 /* On thread 0 of a subcore, set HDEC to max */
347 lbz r4, HSTATE_PTID(r13)
350 LOAD_REG_ADDR(r6, decrementer_max)
354 /* and set per-LPAR registers, if doing dynamic micro-threading */
355 ld r6, HSTATE_SPLIT_MODE(r13)
358 ld r0, KVM_SPLIT_RPR(r6)
360 ld r0, KVM_SPLIT_PMMAR(r6)
362 ld r0, KVM_SPLIT_LDBAR(r6)
365 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
367 /* Order load of vcpu after load of vcore */
369 ld r4, HSTATE_KVM_VCPU(r13)
372 /* Back from the guest, go back to nap */
373 /* Clear our vcpu and vcore pointers so we don't come back in early */
375 std r0, HSTATE_KVM_VCPU(r13)
377 * Once we clear HSTATE_KVM_VCORE(r13), the code in
378 * kvmppc_run_core() is going to assume that all our vcpu
379 * state is visible in memory. This lwsync makes sure
383 std r0, HSTATE_KVM_VCORE(r13)
386 * All secondaries exiting guest will fall through this path.
387 * Before proceeding, just check for HMI interrupt and
388 * invoke opal hmi handler. By now we are sure that the
389 * primary thread on this core/subcore has already made partition
390 * switch/TB resync and we are good to call opal hmi handler.
392 cmpwi r12, BOOK3S_INTERRUPT_HMI
395 li r3,0 /* NULL argument */
396 bl hmi_exception_realmode
398 * At this point we have finished executing in the guest.
399 * We need to wait for hwthread_req to become zero, since
400 * we may not turn on the MMU while hwthread_req is non-zero.
401 * While waiting we also need to check if we get given a vcpu to run.
404 lbz r3, HSTATE_HWTHREAD_REQ(r13)
408 li r0, KVM_HWTHREAD_IN_KERNEL
409 stb r0, HSTATE_HWTHREAD_STATE(r13)
410 /* need to recheck hwthread_req after a barrier, to avoid race */
412 lbz r3, HSTATE_HWTHREAD_REQ(r13)
417 * Jump to idle_return_gpr_loss, which returns to the
418 * idle_kvm_start_guest caller.
422 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
424 /* set up r3 for return */
427 addi r1, r1, STACK_FRAME_OVERHEAD
438 ld r5, HSTATE_KVM_VCORE(r13)
441 ld r3, HSTATE_SPLIT_MODE(r13)
444 lbz r0, KVM_SPLIT_DO_NAP(r3)
450 b kvm_secondary_got_guest
453 ld r5, HSTATE_KVM_VCORE(r13)
457 b kvm_secondary_got_guest
458 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
460 54: li r0, KVM_HWTHREAD_IN_KVM
461 stb r0, HSTATE_HWTHREAD_STATE(r13)
465 * Here the primary thread is trying to return the core to
466 * whole-core mode, so we need to nap.
470 * When secondaries are napping in kvm_unsplit_nap() with
471 * hwthread_req = 1, HMI goes ignored even though subcores are
472 * already exited the guest. Hence HMI keeps waking up secondaries
473 * from nap in a loop and secondaries always go back to nap since
474 * no vcore is assigned to them. This makes impossible for primary
475 * thread to get hold of secondary threads resulting into a soft
476 * lockup in KVM path.
478 * Let us check if HMI is pending and handle it before we go to nap.
480 cmpwi r12, BOOK3S_INTERRUPT_HMI
482 li r3, 0 /* NULL argument */
483 bl hmi_exception_realmode
486 * Ensure that secondary doesn't nap when it has
487 * its vcore pointer set.
489 sync /* matches smp_mb() before setting split_info.do_nap */
490 ld r0, HSTATE_KVM_VCORE(r13)
493 /* clear any pending message */
495 lis r6, (PPC_DBELL_SERVER << (63-36))@h
497 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
498 /* Set kvm_split_mode.napped[tid] = 1 */
499 ld r3, HSTATE_SPLIT_MODE(r13)
501 lhz r4, PACAPACAINDEX(r13)
502 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
503 addi r4, r4, KVM_SPLIT_NAPPED
505 /* Check the do_nap flag again after setting napped[] */
507 lbz r0, KVM_SPLIT_DO_NAP(r3)
510 li r3, NAPPING_UNSPLIT
511 stb r3, HSTATE_NAPPING(r13)
512 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
514 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
521 /******************************************************************************
525 *****************************************************************************/
527 .global kvmppc_hv_entry
532 * R4 = vcpu pointer (or NULL)
537 * all other volatile GPRS = free
538 * Does not preserve non-volatile GPRs or CR fields
541 std r0, PPC_LR_STKOFF(r1)
544 /* Save R1 in the PACA */
545 std r1, HSTATE_HOST_R1(r13)
547 li r6, KVM_GUEST_MODE_HOST_HV
548 stb r6, HSTATE_IN_GUEST(r13)
550 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
551 /* Store initial timestamp */
554 addi r3, r4, VCPU_TB_RMENTRY
555 bl kvmhv_start_timing
559 ld r5, HSTATE_KVM_VCORE(r13)
560 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
563 * POWER7/POWER8 host -> guest partition switch code.
564 * We don't have to lock against concurrent tlbies,
565 * but we do have to coordinate across hardware threads.
567 /* Set bit in entry map iff exit map is zero. */
569 lbz r6, HSTATE_PTID(r13)
571 addi r8, r5, VCORE_ENTRY_EXIT
573 cmpwi r3, 0x100 /* any threads starting to exit? */
574 bge secondary_too_late /* if so we're too late to the party */
579 /* Primary thread switches to guest partition. */
586 li r0,LPID_RSVD /* switch to reserved LPID */
589 mtspr SPRN_SDR1,r6 /* switch to partition page table */
590 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
594 /* See if we need to flush the TLB. */
595 mr r3, r9 /* kvm pointer */
596 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
597 li r5, 0 /* nested vcpu pointer */
598 bl kvmppc_check_need_tlb_flush
600 ld r5, HSTATE_KVM_VCORE(r13)
602 /* Add timebase offset onto timebase */
603 22: ld r8,VCORE_TB_OFFSET(r5)
606 std r8, VCORE_TB_OFFSET_APPL(r5)
607 mftb r6 /* current host timebase */
609 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
610 mftb r7 /* check if lower 24 bits overflowed */
615 addis r8,r8,0x100 /* if so, increment upper 40 bits */
618 /* Load guest PCR value to select appropriate compat mode */
619 37: ld r7, VCORE_PCR(r5)
620 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
628 /* DPDES and VTB are shared between threads */
629 ld r8, VCORE_DPDES(r5)
633 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
635 /* Mark the subcore state as inside guest */
636 bl kvmppc_subcore_enter_guest
638 ld r5, HSTATE_KVM_VCORE(r13)
639 ld r4, HSTATE_KVM_VCPU(r13)
641 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
643 /* Do we have a guest vcpu to run? */
645 beq kvmppc_primary_no_guest
647 /* Increment yield count if they have a VPA */
651 li r6, LPPACA_YIELDCOUNT
656 stb r6, VCPU_VPA_DIRTY(r4)
659 /* Save purr/spurr */
662 std r5,HSTATE_PURR(r13)
663 std r6,HSTATE_SPURR(r13)
669 /* Save host values of some registers */
674 std r5, STACK_SLOT_TID(r1)
675 std r6, STACK_SLOT_PSSCR(r1)
676 std r7, STACK_SLOT_PID(r1)
678 std r5, STACK_SLOT_HFSCR(r1)
679 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
683 mfspr r7, SPRN_DAWRX0
685 std r5, STACK_SLOT_CIABR(r1)
686 std r6, STACK_SLOT_DAWR0(r1)
687 std r7, STACK_SLOT_DAWRX0(r1)
688 std r8, STACK_SLOT_IAMR(r1)
689 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
692 mfspr r7, SPRN_DAWRX1
693 std r6, STACK_SLOT_DAWR1(r1)
694 std r7, STACK_SLOT_DAWRX1(r1)
695 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
698 std r5, STACK_SLOT_AMR(r1)
700 std r6, STACK_SLOT_UAMOR(r1)
703 /* Set partition DABR */
704 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
705 lwz r5,VCPU_DABRX(r4)
710 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
712 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
714 * Branch around the call if both CPU_FTR_TM and
715 * CPU_FTR_P9_TM_HV_ASSIST are off.
719 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
721 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
725 li r5, 0 /* don't preserve non-vol regs */
726 bl kvmppc_restore_tm_hv
728 ld r4, HSTATE_KVM_VCPU(r13)
732 /* Load guest PMU registers; r4 = vcpu pointer here */
734 bl kvmhv_load_guest_pmu
736 /* Load up FP, VMX and VSX registers */
737 ld r4, HSTATE_KVM_VCPU(r13)
740 ld r14, VCPU_GPR(R14)(r4)
741 ld r15, VCPU_GPR(R15)(r4)
742 ld r16, VCPU_GPR(R16)(r4)
743 ld r17, VCPU_GPR(R17)(r4)
744 ld r18, VCPU_GPR(R18)(r4)
745 ld r19, VCPU_GPR(R19)(r4)
746 ld r20, VCPU_GPR(R20)(r4)
747 ld r21, VCPU_GPR(R21)(r4)
748 ld r22, VCPU_GPR(R22)(r4)
749 ld r23, VCPU_GPR(R23)(r4)
750 ld r24, VCPU_GPR(R24)(r4)
751 ld r25, VCPU_GPR(R25)(r4)
752 ld r26, VCPU_GPR(R26)(r4)
753 ld r27, VCPU_GPR(R27)(r4)
754 ld r28, VCPU_GPR(R28)(r4)
755 ld r29, VCPU_GPR(R29)(r4)
756 ld r30, VCPU_GPR(R30)(r4)
757 ld r31, VCPU_GPR(R31)(r4)
759 /* Switch DSCR to guest value */
764 /* Skip next section on POWER7 */
766 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
767 /* Load up POWER8-specific registers */
769 lwz r6, VCPU_PSPB(r4)
775 * Handle broken DAWR case by not writing it. This means we
776 * can still store the DAWR register for migration.
778 LOAD_REG_ADDR(r5, dawr_force_enable)
782 ld r5, VCPU_DAWR0(r4)
783 ld r6, VCPU_DAWRX0(r4)
785 mtspr SPRN_DAWRX0, r6
787 ld r5, VCPU_DAWR1(r4)
788 ld r6, VCPU_DAWRX1(r4)
790 mtspr SPRN_DAWRX1, r6
791 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
793 ld r7, VCPU_CIABR(r4)
798 ld r8, VCPU_EBBHR(r4)
801 ld r5, VCPU_EBBRR(r4)
802 ld r6, VCPU_BESCR(r4)
803 lwz r7, VCPU_GUEST_PID(r4)
810 /* POWER8-only registers */
811 ld r5, VCPU_TCSCR(r4)
813 ld r7, VCPU_CSIGR(r4)
821 /* POWER9-only registers */
823 ld r6, VCPU_PSSCR(r4)
824 lbz r8, HSTATE_FAKE_SUSPEND(r13)
825 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
826 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
827 ld r7, VCPU_HFSCR(r4)
831 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
834 ld r5, VCPU_SPRG0(r4)
835 ld r6, VCPU_SPRG1(r4)
836 ld r7, VCPU_SPRG2(r4)
837 ld r8, VCPU_SPRG3(r4)
843 /* Load up DAR and DSISR */
845 lwz r6, VCPU_DSISR(r4)
849 /* Restore AMR and UAMOR, set AMOR to all 1s */
857 /* Restore state of CTRL run bit; assume 1 on entry */
865 /* Secondary threads wait for primary to have done partition switch */
866 ld r5, HSTATE_KVM_VCORE(r13)
867 lbz r6, HSTATE_PTID(r13)
870 lbz r0, VCORE_IN_GUEST(r5)
874 20: lwz r3, VCORE_ENTRY_EXIT(r5)
877 lbz r0, VCORE_IN_GUEST(r5)
888 * Set the decrementer to the guest decrementer.
890 ld r8,VCPU_DEC_EXPIRES(r4)
891 /* r8 is a host timebase value here, convert to guest TB */
892 ld r5,HSTATE_KVM_VCORE(r13)
893 ld r6,VCORE_TB_OFFSET_APPL(r5)
899 /* Check if HDEC expires soon */
902 cmpdi r3, 512 /* 1 microsecond */
906 lbz r0, KVM_RADIX(r6)
910 /* For hash guest, clear out and reload the SLB */
911 BEGIN_MMU_FTR_SECTION
912 /* Radix host won't have populated the SLB, so no need to clear */
917 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
919 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
920 lwz r5,VCPU_SLB_MAX(r4)
925 1: ld r8,VCPU_SLB_E(r6)
928 addi r6,r6,VCPU_SLB_SIZE
932 #ifdef CONFIG_KVM_XICS
933 /* We are entering the guest on that thread, push VCPU to XIVE */
934 ld r11, VCPU_XIVE_SAVED_STATE(r4)
936 lwz r8, VCPU_XIVE_CAM_WORD(r4)
939 li r7, TM_QW1_OS + TM_WORD2
941 andi. r0, r0, MSR_DR /* in real mode? */
943 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
950 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
957 stb r9, VCPU_XIVE_PUSHED(r4)
961 * We clear the irq_pending flag. There is a small chance of a
962 * race vs. the escalation interrupt happening on another
963 * processor setting it again, but the only consequence is to
964 * cause a spurrious wakeup on the next H_CEDE which is not an
968 stb r0, VCPU_IRQ_PENDING(r4)
971 * In single escalation mode, if the escalation interrupt is
974 lbz r0, VCPU_XIVE_ESC_ON(r4)
977 li r9, XIVE_ESB_SET_PQ_01
978 beq 4f /* in real mode? */
979 ld r10, VCPU_XIVE_ESC_VADDR(r4)
982 4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
986 /* We have a possible subtle race here: The escalation interrupt might
987 * have fired and be on its way to the host queue while we mask it,
988 * and if we unmask it early enough (re-cede right away), there is
989 * a theorical possibility that it fires again, thus landing in the
990 * target queue more than once which is a big no-no.
992 * Fortunately, solving this is rather easy. If the above load setting
993 * PQ to 01 returns a previous value where P is set, then we know the
994 * escalation interrupt is somewhere on its way to the host. In that
995 * case we simply don't clear the xive_esc_on flag below. It will be
996 * eventually cleared by the handler for the escalation interrupt.
998 * Then, when doing a cede, we check that flag again before re-enabling
999 * the escalation interrupt, and if set, we abort the cede.
1001 andi. r0, r0, XIVE_ESB_VAL_P
1004 /* Now P is 0, we can clear the flag */
1006 stb r0, VCPU_XIVE_ESC_ON(r4)
1009 #endif /* CONFIG_KVM_XICS */
1012 stw r0, STACK_SLOT_SHORT_PATH(r1)
1014 deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
1015 /* Check if we can deliver an external or decrementer interrupt now */
1016 ld r0, VCPU_PENDING_EXC(r4)
1018 /* On POWER9, also check for emulated doorbell interrupt */
1019 lbz r3, VCPU_DBELL_REQ(r4)
1021 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1025 bl kvmppc_guest_entry_inject_int
1026 ld r4, HSTATE_KVM_VCPU(r13)
1028 ld r6, VCPU_SRR0(r4)
1029 ld r7, VCPU_SRR1(r4)
1035 ld r11, VCPU_MSR(r4)
1036 /* r11 = vcpu->arch.msr & ~MSR_HV */
1037 rldicl r11, r11, 63 - MSR_HV_LG, 1
1038 rotldi r11, r11, 1 + MSR_HV_LG
1039 ori r11, r11, MSR_ME
1049 * R10: value for HSRR0
1050 * R11: value for HSRR1
1055 stb r0,VCPU_CEDED(r4) /* cancel cede */
1056 mtspr SPRN_HSRR0,r10
1057 mtspr SPRN_HSRR1,r11
1059 /* Activate guest mode, so faults get handled by KVM */
1060 li r9, KVM_GUEST_MODE_GUEST_HV
1061 stb r9, HSTATE_IN_GUEST(r13)
1063 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1064 /* Accumulate timing */
1065 addi r3, r4, VCPU_TB_GUEST
1066 bl kvmhv_accumulate_time
1072 ld r5, VCPU_CFAR(r4)
1074 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1077 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1082 ld r1, VCPU_GPR(R1)(r4)
1083 ld r5, VCPU_GPR(R5)(r4)
1084 ld r8, VCPU_GPR(R8)(r4)
1085 ld r9, VCPU_GPR(R9)(r4)
1086 ld r10, VCPU_GPR(R10)(r4)
1087 ld r11, VCPU_GPR(R11)(r4)
1088 ld r12, VCPU_GPR(R12)(r4)
1089 ld r13, VCPU_GPR(R13)(r4)
1093 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1095 /* Move canary into DSISR to check for later */
1098 mtspr SPRN_HDSISR, r0
1099 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1102 lbz r7, KVM_SECURE_GUEST(r6)
1104 ld r6, VCPU_GPR(R6)(r4)
1105 ld r7, VCPU_GPR(R7)(r4)
1111 ld r0, VCPU_GPR(R0)(r4)
1112 ld r2, VCPU_GPR(R2)(r4)
1113 ld r3, VCPU_GPR(R3)(r4)
1114 ld r4, VCPU_GPR(R4)(r4)
1118 * Use UV_RETURN ultracall to return control back to the Ultravisor after
1119 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
1120 * to the Hypervisor.
1122 * All registers have already been loaded, except:
1124 * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
1131 ld r0, VCPU_GPR(R3)(r4)
1134 ori r3, r3, UV_RETURN
1135 ld r4, VCPU_GPR(R4)(r4)
1139 * Enter the guest on a P9 or later system where we have exactly
1140 * one vcpu per vcore and we don't need to go to real mode
1141 * (which implies that host and guest are both using radix MMU mode).
1143 * Most SPRs and all the VSRs have been loaded already.
1145 _GLOBAL(__kvmhv_vcpu_entry_p9)
1146 EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1148 std r0, PPC_LR_STKOFF(r1)
1152 stw r0, STACK_SLOT_SHORT_PATH(r1)
1154 std r3, HSTATE_KVM_VCPU(r13)
1158 std r1, HSTATE_HOST_R1(r13)
1162 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1168 ld reg, __VCPU_GPR(reg)(r3)
1173 std r10, HSTATE_HOST_MSR(r13)
1176 b fast_guest_entry_c
1177 guest_exit_short_path:
1179 * Malicious or buggy radix guests may have inserted SLB entries
1180 * (only 0..3 because radix always runs with UPRT=1), so these must
1181 * be cleared here to avoid side-channels. slbmte is used rather
1182 * than slbia, as it won't clear cached translations.
1193 li r0, KVM_GUEST_MODE_NONE
1194 stb r0, HSTATE_IN_GUEST(r13)
1198 std reg, __VCPU_GPR(reg)(r9)
1204 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1211 mr r3, r12 /* trap number */
1214 ld r0, PPC_LR_STKOFF(r1)
1217 /* If we are in real mode, do a rfid to get back to the caller */
1219 andi. r5, r4, MSR_IR
1221 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1223 ld r10, HSTATE_HOST_MSR(r13)
1224 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1225 mtspr SPRN_SRR1, r10
1231 stw r12, STACK_SLOT_TRAP(r1)
1234 stw r12, VCPU_TRAP(r4)
1235 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1236 addi r3, r4, VCPU_TB_RMEXIT
1237 bl kvmhv_accumulate_time
1239 11: b kvmhv_switch_to_host
1246 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1247 12: stw r12, VCPU_TRAP(r4)
1249 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1250 addi r3, r4, VCPU_TB_RMEXIT
1251 bl kvmhv_accumulate_time
1255 /******************************************************************************
1259 *****************************************************************************/
1262 * We come here from the first-level interrupt handlers.
1264 .globl kvmppc_interrupt_hv
1265 kvmppc_interrupt_hv:
1267 * Register contents:
1268 * R12 = (guest CR << 32) | interrupt vector
1270 * guest R12 saved in shadow VCPU SCRATCH0
1271 * guest R13 saved in SPRN_SCRATCH0
1273 std r9, HSTATE_SCRATCH2(r13)
1274 lbz r9, HSTATE_IN_GUEST(r13)
1275 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1276 beq kvmppc_bad_host_intr
1277 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1278 cmpwi r9, KVM_GUEST_MODE_GUEST
1279 ld r9, HSTATE_SCRATCH2(r13)
1280 beq kvmppc_interrupt_pr
1282 /* We're now back in the host but in guest MMU context */
1283 li r9, KVM_GUEST_MODE_HOST_HV
1284 stb r9, HSTATE_IN_GUEST(r13)
1286 ld r9, HSTATE_KVM_VCPU(r13)
1288 /* Save registers */
1290 std r0, VCPU_GPR(R0)(r9)
1291 std r1, VCPU_GPR(R1)(r9)
1292 std r2, VCPU_GPR(R2)(r9)
1293 std r3, VCPU_GPR(R3)(r9)
1294 std r4, VCPU_GPR(R4)(r9)
1295 std r5, VCPU_GPR(R5)(r9)
1296 std r6, VCPU_GPR(R6)(r9)
1297 std r7, VCPU_GPR(R7)(r9)
1298 std r8, VCPU_GPR(R8)(r9)
1299 ld r0, HSTATE_SCRATCH2(r13)
1300 std r0, VCPU_GPR(R9)(r9)
1301 std r10, VCPU_GPR(R10)(r9)
1302 std r11, VCPU_GPR(R11)(r9)
1303 ld r3, HSTATE_SCRATCH0(r13)
1304 std r3, VCPU_GPR(R12)(r9)
1305 /* CR is in the high half of r12 */
1309 ld r3, HSTATE_CFAR(r13)
1310 std r3, VCPU_CFAR(r9)
1311 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1313 ld r4, HSTATE_PPR(r13)
1314 std r4, VCPU_PPR(r9)
1315 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1317 /* Restore R1/R2 so we can handle faults */
1318 ld r1, HSTATE_HOST_R1(r13)
1321 mfspr r10, SPRN_SRR0
1322 mfspr r11, SPRN_SRR1
1323 std r10, VCPU_SRR0(r9)
1324 std r11, VCPU_SRR1(r9)
1325 /* trap is in the low half of r12, clear CR from the high half */
1327 andi. r0, r12, 2 /* need to read HSRR0/1? */
1329 mfspr r10, SPRN_HSRR0
1330 mfspr r11, SPRN_HSRR1
1332 1: std r10, VCPU_PC(r9)
1333 std r11, VCPU_MSR(r9)
1337 std r3, VCPU_GPR(R13)(r9)
1340 stw r12,VCPU_TRAP(r9)
1343 * Now that we have saved away SRR0/1 and HSRR0/1,
1344 * interrupts are recoverable in principle, so set MSR_RI.
1345 * This becomes important for relocation-on interrupts from
1346 * the guest, which we can get in radix mode on POWER9.
1351 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1352 addi r3, r9, VCPU_TB_RMINTR
1354 bl kvmhv_accumulate_time
1355 ld r5, VCPU_GPR(R5)(r9)
1356 ld r6, VCPU_GPR(R6)(r9)
1357 ld r7, VCPU_GPR(R7)(r9)
1358 ld r8, VCPU_GPR(R8)(r9)
1361 /* Save HEIR (HV emulation assist reg) in emul_inst
1362 if this is an HEI (HV emulation interrupt, e40) */
1363 li r3,KVM_INST_FETCH_FAILED
1364 stw r3,VCPU_LAST_INST(r9)
1365 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1368 11: stw r3,VCPU_HEIR(r9)
1370 /* these are volatile across C function calls */
1373 std r3, VCPU_CTR(r9)
1374 std r4, VCPU_XER(r9)
1376 /* Save more register state */
1379 std r3, VCPU_DAR(r9)
1380 stw r4, VCPU_DSISR(r9)
1382 /* If this is a page table miss then see if it's theirs or ours */
1383 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1385 std r3, VCPU_FAULT_DAR(r9)
1386 stw r4, VCPU_FAULT_DSISR(r9)
1387 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1390 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1391 /* For softpatch interrupt, go off and do TM instruction emulation */
1392 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1396 /* See if this is a leftover HDEC interrupt */
1397 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1403 bge fast_guest_return
1405 /* See if this is an hcall we can handle in real mode */
1406 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1407 beq hcall_try_real_mode
1409 /* Hypervisor doorbell - exit only if host IPI flag set */
1410 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1415 /* always exit if we're running a nested guest */
1416 ld r0, VCPU_NESTED(r9)
1419 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1420 lbz r0, HSTATE_HOST_IPI(r13)
1422 beq maybe_reenter_guest
1425 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1426 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1428 mfspr r3, SPRN_HFSCR
1429 std r3, VCPU_HFSCR(r9)
1432 /* External interrupt ? */
1433 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1434 beq kvmppc_guest_external
1435 /* See if it is a machine check */
1436 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1437 beq machine_check_realmode
1438 /* Or a hypervisor maintenance interrupt */
1439 cmpwi r12, BOOK3S_INTERRUPT_HMI
1442 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1444 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1445 addi r3, r9, VCPU_TB_RMEXIT
1447 bl kvmhv_accumulate_time
1449 #ifdef CONFIG_KVM_XICS
1450 /* We are exiting, pull the VP from the XIVE */
1451 lbz r0, VCPU_XIVE_PUSHED(r9)
1454 li r7, TM_SPC_PULL_OS_CTX
1457 andi. r0, r0, MSR_DR /* in real mode? */
1459 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1462 /* First load to pull the context, we ignore the value */
1465 /* Second load to recover the context state (Words 0 and 1) */
1468 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1471 /* First load to pull the context, we ignore the value */
1474 /* Second load to recover the context state (Words 0 and 1) */
1476 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1477 /* Fixup some of the state for the next load */
1480 stb r10, VCPU_XIVE_PUSHED(r9)
1481 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1482 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1485 #endif /* CONFIG_KVM_XICS */
1488 * Possibly flush the link stack here, before we do a blr in
1489 * guest_exit_short_path.
1492 patch_site 1b patch__call_kvm_flush_link_stack
1494 /* If we came in through the P9 short path, go back out to C now */
1495 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1497 bne guest_exit_short_path
1499 /* For hash guest, read the guest SLB and save it away */
1501 lbz r0, KVM_RADIX(r5)
1504 bne 0f /* for radix, save 0 entries */
1505 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1510 andis. r0,r8,SLB_ESID_V@h
1512 add r8,r8,r6 /* put index in */
1514 std r8,VCPU_SLB_E(r7)
1515 std r3,VCPU_SLB_V(r7)
1516 addi r7,r7,VCPU_SLB_SIZE
1520 /* Finally clear out the SLB */
1525 stw r5,VCPU_SLB_MAX(r9)
1527 /* load host SLB entries */
1528 BEGIN_MMU_FTR_SECTION
1530 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1531 ld r8,PACA_SLBSHADOWPTR(r13)
1533 .rept SLB_NUM_BOLTED
1534 li r3, SLBSHADOW_SAVEAREA
1538 andis. r7,r5,SLB_ESID_V@h
1546 * Sanitise radix guest SLB, see guest_exit_short_path comment.
1547 * We clear vcpu->arch.slb_max to match earlier behaviour.
1550 stw r0,VCPU_SLB_MAX(r9)
1560 stw r12, STACK_SLOT_TRAP(r1)
1563 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1564 ld r3, HSTATE_KVM_VCORE(r13)
1567 /* On P9, if the guest has large decr enabled, don't sign extend */
1569 ld r4, VCORE_LPCR(r3)
1570 andis. r4, r4, LPCR_LD@h
1572 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1575 /* r5 is a guest timebase value here, convert to host TB */
1576 ld r4,VCORE_TB_OFFSET_APPL(r3)
1578 std r5,VCPU_DEC_EXPIRES(r9)
1580 /* Increment exit count, poke other threads to exit */
1582 bl kvmhv_commence_exit
1584 ld r9, HSTATE_KVM_VCPU(r13)
1586 /* Stop others sending VCPU interrupts to this physical CPU */
1588 stw r0, VCPU_CPU(r9)
1589 stw r0, VCPU_THREAD_CPU(r9)
1591 /* Save guest CTRL register, set runlatch to 1 */
1593 stw r6,VCPU_CTRL(r9)
1600 * Save the guest PURR/SPURR
1605 ld r8,VCPU_SPURR(r9)
1606 std r5,VCPU_PURR(r9)
1607 std r6,VCPU_SPURR(r9)
1612 * Restore host PURR/SPURR and add guest times
1613 * so that the time in the guest gets accounted.
1615 ld r3,HSTATE_PURR(r13)
1616 ld r4,HSTATE_SPURR(r13)
1624 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1625 /* Save POWER8-specific registers */
1629 std r5, VCPU_IAMR(r9)
1630 stw r6, VCPU_PSPB(r9)
1631 std r7, VCPU_FSCR(r9)
1635 std r7, VCPU_TAR(r9)
1636 mfspr r8, SPRN_EBBHR
1637 std r8, VCPU_EBBHR(r9)
1638 mfspr r5, SPRN_EBBRR
1639 mfspr r6, SPRN_BESCR
1642 std r5, VCPU_EBBRR(r9)
1643 std r6, VCPU_BESCR(r9)
1644 stw r7, VCPU_GUEST_PID(r9)
1645 std r8, VCPU_WORT(r9)
1647 mfspr r5, SPRN_TCSCR
1649 mfspr r7, SPRN_CSIGR
1651 std r5, VCPU_TCSCR(r9)
1652 std r6, VCPU_ACOP(r9)
1653 std r7, VCPU_CSIGR(r9)
1654 std r8, VCPU_TACR(r9)
1657 mfspr r6, SPRN_PSSCR
1658 std r5, VCPU_TID(r9)
1659 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1661 std r6, VCPU_PSSCR(r9)
1662 /* Restore host HFSCR value */
1663 ld r7, STACK_SLOT_HFSCR(r1)
1664 mtspr SPRN_HFSCR, r7
1665 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1667 * Restore various registers to 0, where non-zero values
1668 * set by the guest could disrupt the host.
1674 mtspr SPRN_TCSCR, r0
1675 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1678 mtspr SPRN_MMCRS, r0
1679 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1681 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1682 ld r8, STACK_SLOT_IAMR(r1)
1685 8: /* Power7 jumps back in here */
1689 std r6,VCPU_UAMOR(r9)
1690 ld r5,STACK_SLOT_AMR(r1)
1691 ld r6,STACK_SLOT_UAMOR(r1)
1693 mtspr SPRN_UAMOR, r6
1695 /* Switch DSCR back to host value */
1697 ld r7, HSTATE_DSCR(r13)
1698 std r8, VCPU_DSCR(r9)
1701 /* Save non-volatile GPRs */
1702 std r14, VCPU_GPR(R14)(r9)
1703 std r15, VCPU_GPR(R15)(r9)
1704 std r16, VCPU_GPR(R16)(r9)
1705 std r17, VCPU_GPR(R17)(r9)
1706 std r18, VCPU_GPR(R18)(r9)
1707 std r19, VCPU_GPR(R19)(r9)
1708 std r20, VCPU_GPR(R20)(r9)
1709 std r21, VCPU_GPR(R21)(r9)
1710 std r22, VCPU_GPR(R22)(r9)
1711 std r23, VCPU_GPR(R23)(r9)
1712 std r24, VCPU_GPR(R24)(r9)
1713 std r25, VCPU_GPR(R25)(r9)
1714 std r26, VCPU_GPR(R26)(r9)
1715 std r27, VCPU_GPR(R27)(r9)
1716 std r28, VCPU_GPR(R28)(r9)
1717 std r29, VCPU_GPR(R29)(r9)
1718 std r30, VCPU_GPR(R30)(r9)
1719 std r31, VCPU_GPR(R31)(r9)
1722 mfspr r3, SPRN_SPRG0
1723 mfspr r4, SPRN_SPRG1
1724 mfspr r5, SPRN_SPRG2
1725 mfspr r6, SPRN_SPRG3
1726 std r3, VCPU_SPRG0(r9)
1727 std r4, VCPU_SPRG1(r9)
1728 std r5, VCPU_SPRG2(r9)
1729 std r6, VCPU_SPRG3(r9)
1735 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1737 * Branch around the call if both CPU_FTR_TM and
1738 * CPU_FTR_P9_TM_HV_ASSIST are off.
1742 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1744 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1748 li r5, 0 /* don't preserve non-vol regs */
1749 bl kvmppc_save_tm_hv
1751 ld r9, HSTATE_KVM_VCPU(r13)
1755 /* Increment yield count if they have a VPA */
1756 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1759 li r4, LPPACA_YIELDCOUNT
1764 stb r3, VCPU_VPA_DIRTY(r9)
1766 /* Save PMU registers if requested */
1767 /* r8 and cr0.eq are live here */
1770 beq 21f /* if no VPA, save PMU stuff anyway */
1771 lbz r4, LPPACA_PMCINUSE(r8)
1772 21: bl kvmhv_save_guest_pmu
1773 ld r9, HSTATE_KVM_VCPU(r13)
1775 /* Restore host values of some registers */
1777 ld r5, STACK_SLOT_CIABR(r1)
1778 ld r6, STACK_SLOT_DAWR0(r1)
1779 ld r7, STACK_SLOT_DAWRX0(r1)
1780 mtspr SPRN_CIABR, r5
1782 * If the DAWR doesn't work, it's ok to write these here as
1783 * this value should always be zero
1785 mtspr SPRN_DAWR0, r6
1786 mtspr SPRN_DAWRX0, r7
1787 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1789 ld r6, STACK_SLOT_DAWR1(r1)
1790 ld r7, STACK_SLOT_DAWRX1(r1)
1791 mtspr SPRN_DAWR1, r6
1792 mtspr SPRN_DAWRX1, r7
1793 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
1795 ld r5, STACK_SLOT_TID(r1)
1796 ld r6, STACK_SLOT_PSSCR(r1)
1797 ld r7, STACK_SLOT_PID(r1)
1799 mtspr SPRN_PSSCR, r6
1801 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1803 #ifdef CONFIG_PPC_RADIX_MMU
1805 * Are we running hash or radix ?
1808 lbz r0, KVM_RADIX(r5)
1813 * Radix: do eieio; tlbsync; ptesync sequence in case we
1814 * interrupted the guest between a tlbie and a ptesync.
1821 /* Radix: Handle the case where the guest used an illegal PID */
1822 LOAD_REG_ADDR(r4, mmu_base_pid)
1823 lwz r3, VCPU_GUEST_PID(r9)
1829 * Illegal PID, the HW might have prefetched and cached in the TLB
1830 * some translations for the LPID 0 / guest PID combination which
1831 * Linux doesn't know about, so we need to flush that PID out of
1832 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1833 * the right context.
1839 /* Then do a congruence class local flush */
1841 lwz r0,KVM_TLB_SETS(r6)
1843 li r7,0x400 /* IS field = 0b01 */
1845 sldi r0,r3,32 /* RS has PID */
1846 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1850 END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
1853 #endif /* CONFIG_PPC_RADIX_MMU */
1856 * cp_abort is required if the processor supports local copy-paste
1857 * to clear the copy buffer that was under control of the guest.
1861 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
1864 * POWER7/POWER8 guest -> host partition switch code.
1865 * We don't have to lock against tlbies but we do
1866 * have to coordinate the hardware threads.
1867 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1869 kvmhv_switch_to_host:
1870 /* Secondary threads wait for primary to do partition switch */
1871 ld r5,HSTATE_KVM_VCORE(r13)
1872 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1873 lbz r3,HSTATE_PTID(r13)
1877 13: lbz r3,VCORE_IN_GUEST(r5)
1883 /* Primary thread waits for all the secondaries to exit guest */
1884 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1885 rlwinm r0,r3,32-8,0xff
1891 /* Did we actually switch to the guest at all? */
1892 lbz r6, VCORE_IN_GUEST(r5)
1896 /* Primary thread switches back to host partition */
1897 lwz r7,KVM_HOST_LPID(r4)
1899 ld r6,KVM_HOST_SDR1(r4)
1900 li r8,LPID_RSVD /* switch to reserved LPID */
1903 mtspr SPRN_SDR1,r6 /* switch to host page table */
1904 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1909 /* DPDES and VTB are shared between threads */
1910 mfspr r7, SPRN_DPDES
1912 std r7, VCORE_DPDES(r5)
1913 std r8, VCORE_VTB(r5)
1914 /* clear DPDES so we don't get guest doorbells in the host */
1916 mtspr SPRN_DPDES, r8
1917 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1919 /* Subtract timebase offset from timebase */
1920 ld r8, VCORE_TB_OFFSET_APPL(r5)
1924 std r0, VCORE_TB_OFFSET_APPL(r5)
1925 mftb r6 /* current guest timebase */
1927 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1928 mftb r7 /* check if lower 24 bits overflowed */
1933 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1938 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1939 * above, which may or may not have already called
1940 * kvmppc_subcore_exit_guest. Fortunately, all that
1941 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1942 * it again here is benign even if kvmppc_realmode_hmi_handler
1943 * has already called it.
1945 bl kvmppc_subcore_exit_guest
1947 30: ld r5,HSTATE_KVM_VCORE(r13)
1948 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1951 ld r0, VCORE_PCR(r5)
1952 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1957 /* Signal secondary CPUs to continue */
1959 stb r0,VCORE_IN_GUEST(r5)
1960 19: lis r8,0x7fff /* MAX_INT@h */
1963 16: ld r8,KVM_HOST_LPCR(r4)
1967 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1968 /* Finish timing, if we have a vcpu */
1969 ld r4, HSTATE_KVM_VCPU(r13)
1973 bl kvmhv_accumulate_time
1976 /* Unset guest mode */
1977 li r0, KVM_GUEST_MODE_NONE
1978 stb r0, HSTATE_IN_GUEST(r13)
1980 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1981 ld r0, SFS+PPC_LR_STKOFF(r1)
1987 .global kvm_flush_link_stack
1988 kvm_flush_link_stack:
1989 /* Save LR into r0 */
1992 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
1997 /* And on Power9 it's up to 64. */
2002 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2008 kvmppc_guest_external:
2009 /* External interrupt, first check for host_ipi. If this is
2010 * set, we know the host wants us out so let's do it now
2015 * Restore the active volatile registers after returning from
2018 ld r9, HSTATE_KVM_VCPU(r13)
2019 li r12, BOOK3S_INTERRUPT_EXTERNAL
2022 * kvmppc_read_intr return codes:
2024 * Exit to host (r3 > 0)
2025 * 1 An interrupt is pending that needs to be handled by the host
2026 * Exit guest and return to host by branching to guest_exit_cont
2028 * 2 Passthrough that needs completion in the host
2029 * Exit guest and return to host by branching to guest_exit_cont
2030 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
2031 * to indicate to the host to complete handling the interrupt
2033 * Before returning to guest, we check if any CPU is heading out
2034 * to the host and if so, we head out also. If no CPUs are heading
2035 * check return values <= 0.
2037 * Return to guest (r3 <= 0)
2038 * 0 No external interrupt is pending
2039 * -1 A guest wakeup IPI (which has now been cleared)
2040 * In either case, we return to guest to deliver any pending
2043 * -2 A PCI passthrough external interrupt was handled
2044 * (interrupt was delivered directly to guest)
2045 * Return to guest to deliver any pending guest interrupts.
2051 /* Return code = 2 */
2052 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2053 stw r12, VCPU_TRAP(r9)
2056 1: /* Return code <= 1 */
2060 /* Return code <= 0 */
2061 maybe_reenter_guest:
2062 ld r5, HSTATE_KVM_VCORE(r13)
2063 lwz r0, VCORE_ENTRY_EXIT(r5)
2066 blt deliver_guest_interrupt
2069 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2071 * Softpatch interrupt for transactional memory emulation cases
2072 * on POWER9 DD2.2. This is early in the guest exit path - we
2073 * haven't saved registers or done a treclaim yet.
2076 /* Save instruction image in HEIR */
2078 stw r3, VCPU_HEIR(r9)
2081 * The cases we want to handle here are those where the guest
2082 * is in real suspend mode and is trying to transition to
2083 * transactional mode.
2085 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2086 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2088 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2089 cmpwi r3, 1 /* or if not in suspend state */
2092 /* Call C code to do the emulation */
2094 bl kvmhv_p9_tm_emulation_early
2096 ld r9, HSTATE_KVM_VCPU(r13)
2097 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2099 beq guest_exit_cont /* continue exiting if not handled */
2101 ld r11, VCPU_MSR(r9)
2102 b fast_interrupt_c_return /* go back to guest if handled */
2103 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2106 * Check whether an HDSI is an HPTE not found fault or something else.
2107 * If it is an HPTE not found fault that is due to the guest accessing
2108 * a page that they have mapped but which we have paged out, then
2109 * we continue on with the guest exit path. In all other cases,
2110 * reflect the HDSI to the guest as a DSI.
2114 lbz r0, KVM_RADIX(r3)
2116 mfspr r6, SPRN_HDSISR
2118 /* Look for DSISR canary. If we find it, retry instruction */
2121 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2123 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2124 /* HPTE not found fault or protection fault? */
2125 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2126 beq 1f /* if not, send it to the guest */
2127 andi. r0, r11, MSR_DR /* data relocation enabled? */
2130 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2132 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2134 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2135 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2136 bne 7f /* if no SLB entry found */
2137 4: std r4, VCPU_FAULT_DAR(r9)
2138 stw r6, VCPU_FAULT_DSISR(r9)
2140 /* Search the hash table. */
2141 mr r3, r9 /* vcpu pointer */
2142 li r7, 1 /* data fault */
2143 bl kvmppc_hpte_hv_fault
2144 ld r9, HSTATE_KVM_VCPU(r13)
2146 ld r11, VCPU_MSR(r9)
2147 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2148 cmpdi r3, 0 /* retry the instruction */
2150 cmpdi r3, -1 /* handle in kernel mode */
2152 cmpdi r3, -2 /* MMIO emulation; need instr word */
2155 /* Synthesize a DSI (or DSegI) for the guest */
2156 ld r4, VCPU_FAULT_DAR(r9)
2158 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2159 mtspr SPRN_DSISR, r6
2160 7: mtspr SPRN_DAR, r4
2161 mtspr SPRN_SRR0, r10
2162 mtspr SPRN_SRR1, r11
2164 bl kvmppc_msr_interrupt
2165 fast_interrupt_c_return:
2166 6: ld r7, VCPU_CTR(r9)
2173 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2174 ld r5, KVM_VRMA_SLB_V(r5)
2177 /* If this is for emulated MMIO, load the instruction word */
2178 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2180 /* Set guest mode to 'jump over instruction' so if lwz faults
2181 * we'll just continue at the next IP. */
2182 li r0, KVM_GUEST_MODE_SKIP
2183 stb r0, HSTATE_IN_GUEST(r13)
2185 /* Do the access with MSR:DR enabled */
2187 ori r4, r3, MSR_DR /* Enable paging for data */
2192 /* Store the result */
2193 stw r8, VCPU_LAST_INST(r9)
2195 /* Unset guest mode. */
2196 li r0, KVM_GUEST_MODE_HOST_HV
2197 stb r0, HSTATE_IN_GUEST(r13)
2201 std r4, VCPU_FAULT_DAR(r9)
2202 stw r6, VCPU_FAULT_DSISR(r9)
2205 std r5, VCPU_FAULT_GPA(r9)
2209 * Similarly for an HISI, reflect it to the guest as an ISI unless
2210 * it is an HPTE not found fault for a page that we have paged out.
2214 lbz r0, KVM_RADIX(r3)
2216 bne .Lradix_hisi /* for radix, just save ASDR */
2217 andis. r0, r11, SRR1_ISI_NOPT@h
2219 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2222 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2224 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2226 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2227 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2228 bne 7f /* if no SLB entry found */
2230 /* Search the hash table. */
2231 mr r3, r9 /* vcpu pointer */
2234 li r7, 0 /* instruction fault */
2235 bl kvmppc_hpte_hv_fault
2236 ld r9, HSTATE_KVM_VCPU(r13)
2238 ld r11, VCPU_MSR(r9)
2239 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2240 cmpdi r3, 0 /* retry the instruction */
2241 beq fast_interrupt_c_return
2242 cmpdi r3, -1 /* handle in kernel mode */
2245 /* Synthesize an ISI (or ISegI) for the guest */
2247 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2248 7: mtspr SPRN_SRR0, r10
2249 mtspr SPRN_SRR1, r11
2251 bl kvmppc_msr_interrupt
2252 b fast_interrupt_c_return
2254 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2255 ld r5, KVM_VRMA_SLB_V(r6)
2259 * Try to handle an hcall in real mode.
2260 * Returns to the guest if we handle it, or continues on up to
2261 * the kernel if we can't (i.e. if we don't have a handler for
2262 * it, or if the handler returns H_TOO_HARD).
2264 * r5 - r8 contain hcall args,
2265 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2267 hcall_try_real_mode:
2268 ld r3,VCPU_GPR(R3)(r9)
2270 /* sc 1 from userspace - reflect to guest syscall */
2271 bne sc_1_fast_return
2272 /* sc 1 from nested guest - give it to L1 to handle */
2273 ld r0, VCPU_NESTED(r9)
2277 cmpldi r3,hcall_real_table_end - hcall_real_table
2279 /* See if this hcall is enabled for in-kernel handling */
2281 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2282 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2284 ld r0, KVM_ENABLED_HCALLS(r4)
2285 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2289 /* Get pointer to handler, if any, and call it */
2290 LOAD_REG_ADDR(r4, hcall_real_table)
2296 mr r3,r9 /* get vcpu pointer */
2297 ld r4,VCPU_GPR(R4)(r9)
2300 beq hcall_real_fallback
2301 ld r4,HSTATE_KVM_VCPU(r13)
2302 std r3,VCPU_GPR(R3)(r4)
2310 li r10, BOOK3S_INTERRUPT_SYSCALL
2311 bl kvmppc_msr_interrupt
2315 /* We've attempted a real mode hcall, but it's punted it back
2316 * to userspace. We need to restore some clobbered volatiles
2317 * before resuming the pass-it-to-qemu path */
2318 hcall_real_fallback:
2319 li r12,BOOK3S_INTERRUPT_SYSCALL
2320 ld r9, HSTATE_KVM_VCPU(r13)
2324 .globl hcall_real_table
2326 .long 0 /* 0 - unused */
2327 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2328 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2329 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2330 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2331 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2332 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2333 #ifdef CONFIG_SPAPR_TCE_IOMMU
2334 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2335 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2340 .long 0 /* 0x24 - H_SET_SPRG0 */
2341 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2342 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
2356 #ifdef CONFIG_KVM_XICS
2357 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2358 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2359 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2360 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2361 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2363 .long 0 /* 0x64 - H_EOI */
2364 .long 0 /* 0x68 - H_CPPR */
2365 .long 0 /* 0x6c - H_IPI */
2366 .long 0 /* 0x70 - H_IPOLL */
2367 .long 0 /* 0x74 - H_XIRR */
2395 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2396 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2412 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2416 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2417 #ifdef CONFIG_SPAPR_TCE_IOMMU
2418 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2419 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2535 #ifdef CONFIG_KVM_XICS
2536 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2538 .long 0 /* 0x2fc - H_XIRR_X*/
2540 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2541 .globl hcall_real_table_end
2542 hcall_real_table_end:
2544 _GLOBAL(kvmppc_h_set_xdabr)
2545 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2546 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2548 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2551 6: li r3, H_PARAMETER
2554 _GLOBAL(kvmppc_h_set_dabr)
2555 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2556 li r5, DABRX_USER | DABRX_KERNEL
2560 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2561 std r4,VCPU_DABR(r3)
2562 stw r5, VCPU_DABRX(r3)
2563 mtspr SPRN_DABRX, r5
2564 /* Work around P7 bug where DABR can get corrupted on mtspr */
2565 1: mtspr SPRN_DABR,r4
2574 LOAD_REG_ADDR(r11, dawr_force_enable)
2581 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2582 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2583 rlwimi r5, r4, 2, DAWRX_WT
2585 std r4, VCPU_DAWR0(r3)
2586 std r5, VCPU_DAWRX0(r3)
2588 * If came in through the real mode hcall handler then it is necessary
2589 * to write the registers since the return path won't. Otherwise it is
2590 * sufficient to store then in the vcpu struct as they will be loaded
2591 * next time the vcpu is run.
2594 andi. r6, r6, MSR_DR /* in real mode? */
2596 mtspr SPRN_DAWR0, r4
2597 mtspr SPRN_DAWRX0, r5
2601 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2603 std r11,VCPU_MSR(r3)
2605 stb r0,VCPU_CEDED(r3)
2606 sync /* order setting ceded vs. testing prodded */
2607 lbz r5,VCPU_PRODDED(r3)
2609 bne kvm_cede_prodded
2610 li r12,0 /* set trap to 0 to say hcall is handled */
2611 stw r12,VCPU_TRAP(r3)
2613 std r0,VCPU_GPR(R3)(r3)
2616 * Set our bit in the bitmask of napping threads unless all the
2617 * other threads are already napping, in which case we send this
2620 ld r5,HSTATE_KVM_VCORE(r13)
2621 lbz r6,HSTATE_PTID(r13)
2622 lwz r8,VCORE_ENTRY_EXIT(r5)
2626 addi r6,r5,VCORE_NAPPING_THREADS
2633 /* order napping_threads update vs testing entry_exit_map */
2636 stb r0,HSTATE_NAPPING(r13)
2637 lwz r7,VCORE_ENTRY_EXIT(r5)
2639 bge 33f /* another thread already exiting */
2642 * Although not specifically required by the architecture, POWER7
2643 * preserves the following registers in nap mode, even if an SMT mode
2644 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2645 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2647 /* Save non-volatile GPRs */
2648 std r14, VCPU_GPR(R14)(r3)
2649 std r15, VCPU_GPR(R15)(r3)
2650 std r16, VCPU_GPR(R16)(r3)
2651 std r17, VCPU_GPR(R17)(r3)
2652 std r18, VCPU_GPR(R18)(r3)
2653 std r19, VCPU_GPR(R19)(r3)
2654 std r20, VCPU_GPR(R20)(r3)
2655 std r21, VCPU_GPR(R21)(r3)
2656 std r22, VCPU_GPR(R22)(r3)
2657 std r23, VCPU_GPR(R23)(r3)
2658 std r24, VCPU_GPR(R24)(r3)
2659 std r25, VCPU_GPR(R25)(r3)
2660 std r26, VCPU_GPR(R26)(r3)
2661 std r27, VCPU_GPR(R27)(r3)
2662 std r28, VCPU_GPR(R28)(r3)
2663 std r29, VCPU_GPR(R29)(r3)
2664 std r30, VCPU_GPR(R30)(r3)
2665 std r31, VCPU_GPR(R31)(r3)
2670 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2672 * Branch around the call if both CPU_FTR_TM and
2673 * CPU_FTR_P9_TM_HV_ASSIST are off.
2677 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2679 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2681 ld r3, HSTATE_KVM_VCPU(r13)
2683 li r5, 0 /* don't preserve non-vol regs */
2684 bl kvmppc_save_tm_hv
2690 * Set DEC to the smaller of DEC and HDEC, so that we wake
2691 * no later than the end of our timeslice (HDEC interrupts
2692 * don't wake us from nap).
2698 /* On P9 check whether the guest has large decrementer mode enabled */
2699 ld r6, HSTATE_KVM_VCORE(r13)
2700 ld r6, VCORE_LPCR(r6)
2701 andis. r6, r6, LPCR_LD@h
2703 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2710 /* save expiry time of guest decrementer */
2712 ld r4, HSTATE_KVM_VCPU(r13)
2713 ld r5, HSTATE_KVM_VCORE(r13)
2714 ld r6, VCORE_TB_OFFSET_APPL(r5)
2715 subf r3, r6, r3 /* convert to host TB value */
2716 std r3, VCPU_DEC_EXPIRES(r4)
2718 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2719 ld r4, HSTATE_KVM_VCPU(r13)
2720 addi r3, r4, VCPU_TB_CEDE
2721 bl kvmhv_accumulate_time
2724 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2726 /* Go back to host stack */
2727 ld r1, HSTATE_HOST_R1(r13)
2730 * Take a nap until a decrementer or external or doobell interrupt
2731 * occurs, with PECE1 and PECE0 set in LPCR.
2732 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2733 * Also clear the runlatch bit before napping.
2736 mfspr r0, SPRN_CTRLF
2738 mtspr SPRN_CTRLT, r0
2741 stb r0,HSTATE_HWTHREAD_REQ(r13)
2743 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2745 ori r5, r5, LPCR_PECEDH
2746 rlwimi r5, r3, 0, LPCR_PECEDP
2747 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2749 kvm_nap_sequence: /* desired LPCR value in r5 */
2752 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2753 * enable state loss = 1 (allow SMT mode switch)
2754 * requested level = 0 (just stop dispatching)
2756 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2757 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2758 li r4, LPCR_PECE_HVEE@higher
2762 li r3, PNV_THREAD_NAP
2763 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2768 bl isa300_idle_stop_mayloss
2770 bl isa206_idle_insn_mayloss
2771 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2773 mfspr r0, SPRN_CTRLF
2775 mtspr SPRN_CTRLT, r0
2780 stb r0, PACA_FTRACE_ENABLED(r13)
2782 li r0, KVM_HWTHREAD_IN_KVM
2783 stb r0, HSTATE_HWTHREAD_STATE(r13)
2785 lbz r0, HSTATE_NAPPING(r13)
2786 cmpwi r0, NAPPING_CEDE
2788 cmpwi r0, NAPPING_NOVCPU
2789 beq kvm_novcpu_wakeup
2791 cmpwi r0, NAPPING_UNSPLIT
2792 beq kvm_unsplit_wakeup
2793 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2794 twi 31,0,0 /* Nap state must not be zero */
2802 /* Woken by external or decrementer interrupt */
2804 /* get vcpu pointer */
2805 ld r4, HSTATE_KVM_VCPU(r13)
2807 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2808 addi r3, r4, VCPU_TB_RMINTR
2809 bl kvmhv_accumulate_time
2812 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2814 * Branch around the call if both CPU_FTR_TM and
2815 * CPU_FTR_P9_TM_HV_ASSIST are off.
2819 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2821 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2825 li r5, 0 /* don't preserve non-vol regs */
2826 bl kvmppc_restore_tm_hv
2828 ld r4, HSTATE_KVM_VCPU(r13)
2832 /* load up FP state */
2835 /* Restore guest decrementer */
2836 ld r3, VCPU_DEC_EXPIRES(r4)
2837 ld r5, HSTATE_KVM_VCORE(r13)
2838 ld r6, VCORE_TB_OFFSET_APPL(r5)
2839 add r3, r3, r6 /* convert host TB to guest TB value */
2845 ld r14, VCPU_GPR(R14)(r4)
2846 ld r15, VCPU_GPR(R15)(r4)
2847 ld r16, VCPU_GPR(R16)(r4)
2848 ld r17, VCPU_GPR(R17)(r4)
2849 ld r18, VCPU_GPR(R18)(r4)
2850 ld r19, VCPU_GPR(R19)(r4)
2851 ld r20, VCPU_GPR(R20)(r4)
2852 ld r21, VCPU_GPR(R21)(r4)
2853 ld r22, VCPU_GPR(R22)(r4)
2854 ld r23, VCPU_GPR(R23)(r4)
2855 ld r24, VCPU_GPR(R24)(r4)
2856 ld r25, VCPU_GPR(R25)(r4)
2857 ld r26, VCPU_GPR(R26)(r4)
2858 ld r27, VCPU_GPR(R27)(r4)
2859 ld r28, VCPU_GPR(R28)(r4)
2860 ld r29, VCPU_GPR(R29)(r4)
2861 ld r30, VCPU_GPR(R30)(r4)
2862 ld r31, VCPU_GPR(R31)(r4)
2864 /* Check the wake reason in SRR1 to see why we got here */
2865 bl kvmppc_check_wake_reason
2868 * Restore volatile registers since we could have called a
2869 * C routine in kvmppc_check_wake_reason
2871 * r3 tells us whether we need to return to host or not
2872 * WARNING: it gets checked further down:
2873 * should not modify r3 until this check is done.
2875 ld r4, HSTATE_KVM_VCPU(r13)
2877 /* clear our bit in vcore->napping_threads */
2878 34: ld r5,HSTATE_KVM_VCORE(r13)
2879 lbz r7,HSTATE_PTID(r13)
2882 addi r6,r5,VCORE_NAPPING_THREADS
2888 stb r0,HSTATE_NAPPING(r13)
2890 /* See if the wake reason saved in r3 means we need to exit */
2891 stw r12, VCPU_TRAP(r4)
2895 b maybe_reenter_guest
2897 /* cede when already previously prodded case */
2900 stb r0,VCPU_PRODDED(r3)
2901 sync /* order testing prodded vs. clearing ceded */
2902 stb r0,VCPU_CEDED(r3)
2906 /* we've ceded but we want to give control to the host */
2908 ld r9, HSTATE_KVM_VCPU(r13)
2909 #ifdef CONFIG_KVM_XICS
2910 /* are we using XIVE with single escalation? */
2911 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2914 li r6, XIVE_ESB_SET_PQ_00
2916 * If we still have a pending escalation, abort the cede,
2917 * and we must set PQ to 10 rather than 00 so that we don't
2918 * potentially end up with two entries for the escalation
2919 * interrupt in the XIVE interrupt queue. In that case
2920 * we also don't want to set xive_esc_on to 1 here in
2921 * case we race with xive_esc_irq().
2923 lbz r5, VCPU_XIVE_ESC_ON(r9)
2927 stb r0, VCPU_CEDED(r9)
2929 * The escalation interrupts are special as we don't EOI them.
2930 * There is no need to use the load-after-store ordering offset
2931 * to set PQ to 10 as we won't use StoreEOI.
2933 li r6, XIVE_ESB_SET_PQ_10
2936 stb r0, VCPU_XIVE_ESC_ON(r9)
2937 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2939 5: /* Enable XIVE escalation */
2941 andi. r0, r0, MSR_DR /* in real mode? */
2945 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2948 #endif /* CONFIG_KVM_XICS */
2949 3: b guest_exit_cont
2951 /* Try to do machine check recovery in real mode */
2952 machine_check_realmode:
2953 mr r3, r9 /* get vcpu pointer */
2954 bl kvmppc_realmode_machine_check
2956 /* all machine checks go to virtual mode for further handling */
2957 ld r9, HSTATE_KVM_VCPU(r13)
2958 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2962 * Call C code to handle a HMI in real mode.
2963 * Only the primary thread does the call, secondary threads are handled
2964 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2965 * r9 points to the vcpu on entry
2968 lbz r0, HSTATE_PTID(r13)
2971 bl kvmppc_realmode_hmi_handler
2972 ld r9, HSTATE_KVM_VCPU(r13)
2973 li r12, BOOK3S_INTERRUPT_HMI
2977 * Check the reason we woke from nap, and take appropriate action.
2979 * 0 if nothing needs to be done
2980 * 1 if something happened that needs to be handled by the host
2981 * -1 if there was a guest wakeup (IPI or msgsnd)
2982 * -2 if we handled a PCI passthrough interrupt (returned by
2983 * kvmppc_read_intr only)
2985 * Also sets r12 to the interrupt vector for any interrupt that needs
2986 * to be handled now by the host (0x500 for external interrupt), or zero.
2987 * Modifies all volatile registers (since it may call a C function).
2988 * This routine calls kvmppc_read_intr, a C function, if an external
2989 * interrupt is pending.
2991 kvmppc_check_wake_reason:
2994 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2996 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2997 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2998 cmpwi r6, 8 /* was it an external interrupt? */
2999 beq 7f /* if so, see what it was */
3002 cmpwi r6, 6 /* was it the decrementer? */
3005 cmpwi r6, 5 /* privileged doorbell? */
3007 cmpwi r6, 3 /* hypervisor doorbell? */
3009 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3010 cmpwi r6, 0xa /* Hypervisor maintenance ? */
3012 li r3, 1 /* anything else, return 1 */
3015 /* hypervisor doorbell */
3016 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
3019 * Clear the doorbell as we will invoke the handler
3020 * explicitly in the guest exit path.
3022 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3024 /* see if it's a host IPI */
3029 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3030 lbz r0, HSTATE_HOST_IPI(r13)
3033 /* if not, return -1 */
3037 /* Woken up due to Hypervisor maintenance interrupt */
3038 4: li r12, BOOK3S_INTERRUPT_HMI
3042 /* external interrupt - create a stack frame so we can call C */
3044 std r0, PPC_LR_STKOFF(r1)
3045 stdu r1, -PPC_MIN_STKFRM(r1)
3048 li r12, BOOK3S_INTERRUPT_EXTERNAL
3053 * Return code of 2 means PCI passthrough interrupt, but
3054 * we need to return back to host to complete handling the
3055 * interrupt. Trap reason is expected in r12 by guest
3058 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
3060 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3061 addi r1, r1, PPC_MIN_STKFRM
3066 * Save away FP, VMX and VSX registers.
3068 * N.B. r30 and r31 are volatile across this function,
3069 * thus it is not callable from C.
3076 #ifdef CONFIG_ALTIVEC
3078 oris r8,r8,MSR_VEC@h
3079 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3083 oris r8,r8,MSR_VSX@h
3084 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3087 addi r3,r3,VCPU_FPRS
3089 #ifdef CONFIG_ALTIVEC
3091 addi r3,r31,VCPU_VRS
3093 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3095 mfspr r6,SPRN_VRSAVE
3096 stw r6,VCPU_VRSAVE(r31)
3101 * Load up FP, VMX and VSX registers
3103 * N.B. r30 and r31 are volatile across this function,
3104 * thus it is not callable from C.
3111 #ifdef CONFIG_ALTIVEC
3113 oris r8,r8,MSR_VEC@h
3114 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3118 oris r8,r8,MSR_VSX@h
3119 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3122 addi r3,r4,VCPU_FPRS
3124 #ifdef CONFIG_ALTIVEC
3126 addi r3,r31,VCPU_VRS
3128 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3130 lwz r7,VCPU_VRSAVE(r31)
3131 mtspr SPRN_VRSAVE,r7
3136 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3138 * Save transactional state and TM-related registers.
3139 * Called with r3 pointing to the vcpu struct and r4 containing
3140 * the guest MSR value.
3141 * r5 is non-zero iff non-volatile register state needs to be maintained.
3142 * If r5 == 0, this can modify all checkpointed registers, but
3143 * restores r1 and r2 before exit.
3145 _GLOBAL_TOC(kvmppc_save_tm_hv)
3146 EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
3147 /* See if we need to handle fake suspend mode */
3150 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3152 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3154 beq __kvmppc_save_tm
3156 /* The following code handles the fake_suspend = 1 case */
3158 std r0, PPC_LR_STKOFF(r1)
3159 stdu r1, -PPC_MIN_STKFRM(r1)
3164 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3167 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3170 bl pnv_power9_force_smt4_catch
3171 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3174 /* We have to treclaim here because that's the only way to do S->N */
3175 li r3, TM_CAUSE_KVM_RESCHED
3179 * We were in fake suspend, so we are not going to save the
3180 * register state as the guest checkpointed state (since
3181 * we already have it), therefore we can now use any volatile GPR.
3182 * In fact treclaim in fake suspend state doesn't modify
3187 bl pnv_power9_force_smt4_release
3188 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3192 mfspr r3, SPRN_PSSCR
3193 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3194 li r0, PSSCR_FAKE_SUSPEND
3196 mtspr SPRN_PSSCR, r3
3198 /* Don't save TEXASR, use value from last exit in real suspend state */
3199 ld r9, HSTATE_KVM_VCPU(r13)
3200 mfspr r5, SPRN_TFHAR
3201 mfspr r6, SPRN_TFIAR
3202 std r5, VCPU_TFHAR(r9)
3203 std r6, VCPU_TFIAR(r9)
3205 addi r1, r1, PPC_MIN_STKFRM
3206 ld r0, PPC_LR_STKOFF(r1)
3211 * Restore transactional state and TM-related registers.
3212 * Called with r3 pointing to the vcpu struct
3213 * and r4 containing the guest MSR value.
3214 * r5 is non-zero iff non-volatile register state needs to be maintained.
3215 * This potentially modifies all checkpointed registers.
3216 * It restores r1 and r2 from the PACA.
3218 _GLOBAL_TOC(kvmppc_restore_tm_hv)
3219 EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
3221 * If we are doing TM emulation for the guest on a POWER9 DD2,
3222 * then we don't actually do a trechkpt -- we either set up
3223 * fake-suspend mode, or emulate a TM rollback.
3226 b __kvmppc_restore_tm
3227 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3229 std r0, PPC_LR_STKOFF(r1)
3232 stb r0, HSTATE_FAKE_SUSPEND(r13)
3234 /* Turn on TM so we can restore TM SPRs */
3237 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3241 * The user may change these outside of a transaction, so they must
3242 * always be context switched.
3244 ld r5, VCPU_TFHAR(r3)
3245 ld r6, VCPU_TFIAR(r3)
3246 ld r7, VCPU_TEXASR(r3)
3247 mtspr SPRN_TFHAR, r5
3248 mtspr SPRN_TFIAR, r6
3249 mtspr SPRN_TEXASR, r7
3251 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3252 beqlr /* TM not active in guest */
3254 /* Make sure the failure summary is set */
3255 oris r7, r7, (TEXASR_FS)@h
3256 mtspr SPRN_TEXASR, r7
3258 cmpwi r5, 1 /* check for suspended state */
3260 stb r5, HSTATE_FAKE_SUSPEND(r13)
3261 b 9f /* and return */
3262 10: stdu r1, -PPC_MIN_STKFRM(r1)
3263 /* guest is in transactional state, so simulate rollback */
3264 bl kvmhv_emulate_tm_rollback
3266 addi r1, r1, PPC_MIN_STKFRM
3267 9: ld r0, PPC_LR_STKOFF(r1)
3270 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3273 * We come here if we get any exception or interrupt while we are
3274 * executing host real mode code while in guest MMU context.
3275 * r12 is (CR << 32) | vector
3276 * r13 points to our PACA
3277 * r12 is saved in HSTATE_SCRATCH0(r13)
3278 * r9 is saved in HSTATE_SCRATCH2(r13)
3279 * r13 is saved in HSPRG1
3280 * cfar is saved in HSTATE_CFAR(r13)
3281 * ppr is saved in HSTATE_PPR(r13)
3283 kvmppc_bad_host_intr:
3285 * Switch to the emergency stack, but start half-way down in
3286 * case we were already on it.
3290 ld r1, PACAEMERGSP(r13)
3291 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3304 mfspr r3, SPRN_HSRR0
3305 mfspr r4, SPRN_HSRR1
3307 mfspr r6, SPRN_HDSISR
3309 1: mfspr r3, SPRN_SRR0
3312 mfspr r6, SPRN_DSISR
3317 ld r9, HSTATE_SCRATCH2(r13)
3318 ld r12, HSTATE_SCRATCH0(r13)
3323 ld r5, HSTATE_CFAR(r13)
3324 std r5, ORIG_GPR3(r1)
3328 lbz r6, PACAIRQSOFTMASK(r13)
3334 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3335 std r3, STACK_FRAME_OVERHEAD-16(r1)
3338 * On POWER9 do a minimal restore of the MMU and call C code,
3339 * which will print a message and panic.
3340 * XXX On POWER7 and POWER8, we just spin here since we don't
3341 * know what the other threads are doing (and we don't want to
3342 * coordinate with them) - but at least we now have register state
3343 * in memory that we might be able to look at from another CPU.
3347 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3348 ld r9, HSTATE_KVM_VCPU(r13)
3349 ld r10, VCPU_KVM(r9)
3354 mtspr SPRN_CIABR, r0
3355 mtspr SPRN_DAWRX0, r0
3357 mtspr SPRN_DAWRX1, r0
3358 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
3360 /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
3364 BEGIN_MMU_FTR_SECTION
3366 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3369 ld r8, PACA_SLBSHADOWPTR(r13)
3370 .rept SLB_NUM_BOLTED
3371 li r3, SLBSHADOW_SAVEAREA
3375 andis. r7, r5, SLB_ESID_V@h
3381 4: lwz r7, KVM_HOST_LPID(r10)
3384 ld r8, KVM_HOST_LPCR(r10)
3387 li r0, KVM_GUEST_MODE_NONE
3388 stb r0, HSTATE_IN_GUEST(r13)
3391 * Turn on the MMU and jump to C code
3395 addi r3, r3, 9f - 5b
3397 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3398 ld r4, PACAKMSR(r13)
3402 9: addi r3, r1, STACK_FRAME_OVERHEAD
3403 bl kvmppc_bad_interrupt
3407 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3408 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3409 * r11 has the guest MSR value (in/out)
3410 * r9 has a vcpu pointer (in)
3411 * r0 is used as a scratch register
3413 kvmppc_msr_interrupt:
3414 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3415 cmpwi r0, 2 /* Check if we are in transactional state.. */
3416 ld r11, VCPU_INTR_MSR(r9)
3418 /* ... if transactional, change to suspended */
3420 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3424 * Load up guest PMU state. R3 points to the vcpu struct.
3426 _GLOBAL(kvmhv_load_guest_pmu)
3427 EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3431 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3432 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3435 ld r3, VCPU_MMCR(r4)
3436 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3437 cmpwi r5, MMCR0_PMAO
3438 beql kvmppc_fix_pmao
3439 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3440 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3441 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3442 lwz r6, VCPU_PMC + 8(r4)
3443 lwz r7, VCPU_PMC + 12(r4)
3444 lwz r8, VCPU_PMC + 16(r4)
3445 lwz r9, VCPU_PMC + 20(r4)
3452 ld r3, VCPU_MMCR(r4)
3453 ld r5, VCPU_MMCR + 8(r4)
3454 ld r6, VCPU_MMCRA(r4)
3455 ld r7, VCPU_SIAR(r4)
3456 ld r8, VCPU_SDAR(r4)
3457 mtspr SPRN_MMCR1, r5
3458 mtspr SPRN_MMCRA, r6
3462 ld r5, VCPU_MMCR + 24(r4)
3463 ld r6, VCPU_SIER + 8(r4)
3464 ld r7, VCPU_SIER + 16(r4)
3465 mtspr SPRN_MMCR3, r5
3466 mtspr SPRN_SIER2, r6
3467 mtspr SPRN_SIER3, r7
3468 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3470 ld r5, VCPU_MMCR + 16(r4)
3471 ld r6, VCPU_SIER(r4)
3472 mtspr SPRN_MMCR2, r5
3474 BEGIN_FTR_SECTION_NESTED(96)
3475 lwz r7, VCPU_PMC + 24(r4)
3476 lwz r8, VCPU_PMC + 28(r4)
3477 ld r9, VCPU_MMCRS(r4)
3478 mtspr SPRN_SPMC1, r7
3479 mtspr SPRN_SPMC2, r8
3480 mtspr SPRN_MMCRS, r9
3481 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3482 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3483 mtspr SPRN_MMCR0, r3
3489 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3491 _GLOBAL(kvmhv_load_host_pmu)
3492 EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3494 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3496 beq 23f /* skip if not */
3498 ld r3, HSTATE_MMCR0(r13)
3499 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3500 cmpwi r4, MMCR0_PMAO
3501 beql kvmppc_fix_pmao
3502 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3503 lwz r3, HSTATE_PMC1(r13)
3504 lwz r4, HSTATE_PMC2(r13)
3505 lwz r5, HSTATE_PMC3(r13)
3506 lwz r6, HSTATE_PMC4(r13)
3507 lwz r8, HSTATE_PMC5(r13)
3508 lwz r9, HSTATE_PMC6(r13)
3515 ld r3, HSTATE_MMCR0(r13)
3516 ld r4, HSTATE_MMCR1(r13)
3517 ld r5, HSTATE_MMCRA(r13)
3518 ld r6, HSTATE_SIAR(r13)
3519 ld r7, HSTATE_SDAR(r13)
3520 mtspr SPRN_MMCR1, r4
3521 mtspr SPRN_MMCRA, r5
3525 ld r8, HSTATE_MMCR2(r13)
3526 ld r9, HSTATE_SIER(r13)
3527 mtspr SPRN_MMCR2, r8
3529 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3531 ld r5, HSTATE_MMCR3(r13)
3532 ld r6, HSTATE_SIER2(r13)
3533 ld r7, HSTATE_SIER3(r13)
3534 mtspr SPRN_MMCR3, r5
3535 mtspr SPRN_SIER2, r6
3536 mtspr SPRN_SIER3, r7
3537 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3538 mtspr SPRN_MMCR0, r3
3544 * Save guest PMU state into the vcpu struct.
3545 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3547 _GLOBAL(kvmhv_save_guest_pmu)
3548 EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3553 * POWER8 seems to have a hardware bug where setting
3554 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3555 * when some counters are already negative doesn't seem
3556 * to cause a performance monitor alert (and hence interrupt).
3557 * The effect of this is that when saving the PMU state,
3558 * if there is no PMU alert pending when we read MMCR0
3559 * before freezing the counters, but one becomes pending
3560 * before we read the counters, we lose it.
3561 * To work around this, we need a way to freeze the counters
3562 * before reading MMCR0. Normally, freezing the counters
3563 * is done by writing MMCR0 (to set MMCR0[FC]) which
3564 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3565 * we can also freeze the counters using MMCR2, by writing
3566 * 1s to all the counter freeze condition bits (there are
3567 * 9 bits each for 6 counters).
3569 li r3, -1 /* set all freeze bits */
3571 mfspr r10, SPRN_MMCR2
3572 mtspr SPRN_MMCR2, r3
3574 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3576 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3577 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3578 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3579 mfspr r6, SPRN_MMCRA
3580 /* Clear MMCRA in order to disable SDAR updates */
3582 mtspr SPRN_MMCRA, r7
3584 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3586 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3588 21: mfspr r5, SPRN_MMCR1
3591 std r4, VCPU_MMCR(r9)
3592 std r5, VCPU_MMCR + 8(r9)
3593 std r6, VCPU_MMCRA(r9)
3595 std r10, VCPU_MMCR + 16(r9)
3596 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3598 mfspr r5, SPRN_MMCR3
3599 mfspr r6, SPRN_SIER2
3600 mfspr r7, SPRN_SIER3
3601 std r5, VCPU_MMCR + 24(r9)
3602 std r6, VCPU_SIER + 8(r9)
3603 std r7, VCPU_SIER + 16(r9)
3604 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3605 std r7, VCPU_SIAR(r9)
3606 std r8, VCPU_SDAR(r9)
3613 stw r3, VCPU_PMC(r9)
3614 stw r4, VCPU_PMC + 4(r9)
3615 stw r5, VCPU_PMC + 8(r9)
3616 stw r6, VCPU_PMC + 12(r9)
3617 stw r7, VCPU_PMC + 16(r9)
3618 stw r8, VCPU_PMC + 20(r9)
3621 std r5, VCPU_SIER(r9)
3622 BEGIN_FTR_SECTION_NESTED(96)
3623 mfspr r6, SPRN_SPMC1
3624 mfspr r7, SPRN_SPMC2
3625 mfspr r8, SPRN_MMCRS
3626 stw r6, VCPU_PMC + 24(r9)
3627 stw r7, VCPU_PMC + 28(r9)
3628 std r8, VCPU_MMCRS(r9)
3630 mtspr SPRN_MMCRS, r4
3631 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3632 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3636 * This works around a hardware bug on POWER8E processors, where
3637 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3638 * performance monitor interrupt. Instead, when we need to have
3639 * an interrupt pending, we have to arrange for a counter to overflow.
3643 mtspr SPRN_MMCR2, r3
3644 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3645 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3646 mtspr SPRN_MMCR0, r3
3653 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3655 * Start timing an activity
3656 * r3 = pointer to time accumulation struct, r4 = vcpu
3659 ld r5, HSTATE_KVM_VCORE(r13)
3660 ld r6, VCORE_TB_OFFSET_APPL(r5)
3662 subf r5, r6, r5 /* subtract current timebase offset */
3663 std r3, VCPU_CUR_ACTIVITY(r4)
3664 std r5, VCPU_ACTIVITY_START(r4)
3668 * Accumulate time to one activity and start another.
3669 * r3 = pointer to new time accumulation struct, r4 = vcpu
3671 kvmhv_accumulate_time:
3672 ld r5, HSTATE_KVM_VCORE(r13)
3673 ld r8, VCORE_TB_OFFSET_APPL(r5)
3674 ld r5, VCPU_CUR_ACTIVITY(r4)
3675 ld r6, VCPU_ACTIVITY_START(r4)
3676 std r3, VCPU_CUR_ACTIVITY(r4)
3678 subf r7, r8, r7 /* subtract current timebase offset */
3679 std r7, VCPU_ACTIVITY_START(r4)
3683 ld r8, TAS_SEQCOUNT(r5)
3686 std r8, TAS_SEQCOUNT(r5)
3688 ld r7, TAS_TOTAL(r5)
3690 std r7, TAS_TOTAL(r5)
3696 3: std r3, TAS_MIN(r5)
3702 std r8, TAS_SEQCOUNT(r5)