2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
32 #ifdef __LITTLE_ENDIAN__
33 #error Need to fix lppaca and SLB shadow accesses in little endian mode
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
86 lwz r3, HSTATE_PMC(r13)
87 lwz r4, HSTATE_PMC + 4(r13)
88 lwz r5, HSTATE_PMC + 8(r13)
89 lwz r6, HSTATE_PMC + 12(r13)
90 lwz r8, HSTATE_PMC + 16(r13)
91 lwz r9, HSTATE_PMC + 20(r13)
93 lwz r10, HSTATE_PMC + 24(r13)
94 lwz r11, HSTATE_PMC + 28(r13)
95 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
105 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106 ld r3, HSTATE_MMCR(r13)
107 ld r4, HSTATE_MMCR + 8(r13)
108 ld r5, HSTATE_MMCR + 16(r13)
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
119 ld r3, HSTATE_DECEXP(r13)
125 * For external and machine check interrupts, we need
126 * to call the Linux handler to process the interrupt.
127 * We do that by jumping to absolute address 0x500 for
128 * external interrupts, or the machine_check_fwnmi label
129 * for machine checks (since firmware might have patched
130 * the vector area at 0x200). The [h]rfid at the end of the
131 * handler will return to the book3s_hv_interrupts.S code.
132 * For other interrupts we do the rfid to get back
133 * to the book3s_hv_interrupts.S code here.
135 ld r8, 112+PPC_LR_STKOFF(r1)
137 ld r7, HSTATE_HOST_MSR(r13)
139 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
140 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
143 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
145 /* RFI into the highmem handler, or branch to interrupt handler */
149 mtmsrd r6, 1 /* Clear RI in MSR */
152 beqa 0x500 /* external interrupt (PPC970) */
153 beq cr1, 13f /* machine check */
156 /* On POWER7, we have external interrupts set to use HSRR0/1 */
157 11: mtspr SPRN_HSRR0, r8
161 13: b machine_check_fwnmi
164 kvmppc_primary_no_guest:
165 /* We handle this much like a ceded vcpu */
166 /* set our bit in napping_threads */
167 ld r5, HSTATE_KVM_VCORE(r13)
168 lbz r7, HSTATE_PTID(r13)
171 addi r6, r5, VCORE_NAPPING_THREADS
176 /* order napping_threads update vs testing entry_exit_count */
179 lwz r7, VCORE_ENTRY_EXIT(r5)
181 bge kvm_novcpu_exit /* another thread already exiting */
182 li r3, NAPPING_NOVCPU
183 stb r3, HSTATE_NAPPING(r13)
185 stb r3, HSTATE_HWTHREAD_REQ(r13)
190 ld r1, HSTATE_HOST_R1(r13)
191 ld r5, HSTATE_KVM_VCORE(r13)
193 stb r0, HSTATE_NAPPING(r13)
194 stb r0, HSTATE_HWTHREAD_REQ(r13)
196 /* see if any other thread is already exiting */
198 lwz r0, VCORE_ENTRY_EXIT(r5)
202 /* clear our bit in napping_threads */
203 lbz r7, HSTATE_PTID(r13)
206 addi r6, r5, VCORE_NAPPING_THREADS
212 /* Check the wake reason in SRR1 to see why we got here */
214 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
215 cmpwi r3, 4 /* was it an external interrupt? */
216 bne kvm_novcpu_exit /* if not, exit the guest */
218 /* extern interrupt - read and handle it */
219 li r12, BOOK3S_INTERRUPT_EXTERNAL
225 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
226 ld r4, HSTATE_KVM_VCPU(r13)
234 * We come in here when wakened from nap mode.
235 * Relocation is off and most register values are lost.
236 * r13 points to the PACA.
238 .globl kvm_start_guest
242 li r0,KVM_HWTHREAD_IN_KVM
243 stb r0,HSTATE_HWTHREAD_STATE(r13)
245 /* NV GPR values from power7_idle() will no longer be valid */
247 stb r0,PACA_NAPSTATELOST(r13)
249 /* were we napping due to cede? */
250 lbz r0,HSTATE_NAPPING(r13)
251 cmpwi r0,NAPPING_CEDE
253 cmpwi r0,NAPPING_NOVCPU
254 beq kvm_novcpu_wakeup
256 ld r1,PACAEMERGSP(r13)
257 subi r1,r1,STACK_FRAME_OVERHEAD
260 * We weren't napping due to cede, so this must be a secondary
261 * thread being woken up to run a guest, or being woken up due
262 * to a stray IPI. (Or due to some machine check or hypervisor
263 * maintenance interrupt while the core is in KVM.)
266 /* Check the wake reason in SRR1 to see why we got here */
268 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
269 cmpwi r3,4 /* was it an external interrupt? */
271 ld r5,HSTATE_XICS_PHYS(r13)
272 li r7,XICS_XIRR /* if it was an external interrupt, */
273 lwzcix r8,r5,r7 /* get and ack the interrupt */
275 clrldi. r9,r8,40 /* get interrupt source ID. */
276 beq 28f /* none there? */
277 cmpwi r9,XICS_IPI /* was it an IPI? */
281 stbcix r0,r5,r6 /* clear IPI */
282 stwcix r8,r5,r7 /* EOI the interrupt */
283 sync /* order loading of vcpu after that */
285 /* get vcpu pointer, NULL if we have no vcpu to run */
286 ld r4,HSTATE_KVM_VCPU(r13)
288 /* if we have no vcpu to run, go back to sleep */
292 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
294 28: /* SRR1 said external but ICP said nope?? */
296 29: /* External non-IPI interrupt to offline secondary thread? help?? */
297 stw r8,HSTATE_SAVED_XIRR(r13)
301 /* Set HSTATE_DSCR(r13) to something sensible */
302 LOAD_REG_ADDR(r6, dscr_default)
304 std r6, HSTATE_DSCR(r13)
308 /* Back from the guest, go back to nap */
309 /* Clear our vcpu pointer so we don't come back in early */
311 std r0, HSTATE_KVM_VCPU(r13)
313 /* Clear any pending IPI - we're an offline thread */
314 ld r5, HSTATE_XICS_PHYS(r13)
316 lwzcix r3, r5, r7 /* ack any pending interrupt */
317 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
322 stbcix r0, r5, r6 /* clear the IPI */
323 stwcix r3, r5, r7 /* EOI it */
326 /* increment the nap count and then go to nap mode */
327 ld r4, HSTATE_KVM_VCORE(r13)
328 addi r4, r4, VCORE_NAP_COUNT
329 lwsync /* make previous updates visible */
336 li r0, KVM_HWTHREAD_IN_NAP
337 stb r0, HSTATE_HWTHREAD_STATE(r13)
341 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
344 std r0, HSTATE_SCRATCH0(r13)
346 ld r0, HSTATE_SCRATCH0(r13)
352 /******************************************************************************
356 *****************************************************************************/
358 .global kvmppc_hv_entry
363 * R4 = vcpu pointer (or NULL)
367 * all other volatile GPRS = free
370 std r0, PPC_LR_STKOFF(r1)
373 /* Save R1 in the PACA */
374 std r1, HSTATE_HOST_R1(r13)
376 li r6, KVM_GUEST_MODE_HOST_HV
377 stb r6, HSTATE_IN_GUEST(r13)
387 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
389 * POWER7 host -> guest partition switch code.
390 * We don't have to lock against concurrent tlbies,
391 * but we do have to coordinate across hardware threads.
393 /* Increment entry count iff exit count is zero. */
394 ld r5,HSTATE_KVM_VCORE(r13)
395 addi r9,r5,VCORE_ENTRY_EXIT
397 cmpwi r3,0x100 /* any threads starting to exit? */
398 bge secondary_too_late /* if so we're too late to the party */
403 /* Primary thread switches to guest partition. */
404 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
405 lbz r6,HSTATE_PTID(r13)
410 li r0,LPID_RSVD /* switch to reserved LPID */
413 mtspr SPRN_SDR1,r6 /* switch to partition page table */
417 /* See if we need to flush the TLB */
418 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
419 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
420 srdi r6,r6,6 /* doubleword number */
421 sldi r6,r6,3 /* address offset */
423 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
429 23: ldarx r7,0,r6 /* if set, clear the bit */
433 li r6,128 /* and flush the TLB */
435 li r7,0x800 /* IS field = 0b10 */
442 /* Add timebase offset onto timebase */
443 22: ld r8,VCORE_TB_OFFSET(r5)
446 mftb r6 /* current host timebase */
448 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
449 mftb r7 /* check if lower 24 bits overflowed */
454 addis r8,r8,0x100 /* if so, increment upper 40 bits */
457 /* Load guest PCR value to select appropriate compat mode */
458 37: ld r7, VCORE_PCR(r5)
464 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
467 /* Secondary threads wait for primary to have done partition switch */
468 20: lbz r0,VCORE_IN_GUEST(r5)
472 /* Set LPCR and RMOR. */
473 10: ld r8,VCORE_LPCR(r5)
479 /* Check if HDEC expires soon */
481 cmpwi r3,512 /* 1 microsecond */
482 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
487 * PPC970 host -> guest partition switch code.
488 * We have to lock against concurrent tlbies,
489 * using native_tlbie_lock to lock against host tlbies
490 * and kvm->arch.tlbie_lock to lock against guest tlbies.
491 * We also have to invalidate the TLB since its
492 * entries aren't tagged with the LPID.
494 30: ld r5,HSTATE_KVM_VCORE(r13)
495 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
497 /* first take native_tlbie_lock */
500 .tc native_tlbie_lock[TC],native_tlbie_lock
502 ld r3,toc_tlbie_lock@toc(2)
503 #ifdef __BIG_ENDIAN__
504 lwz r8,PACA_LOCK_TOKEN(r13)
506 lwz r8,PACAPACAINDEX(r13)
515 ld r5,HSTATE_KVM_VCORE(r13)
516 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
518 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
522 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
525 stw r0,0(r3) /* drop native_tlbie_lock */
527 /* invalidate the whole TLB */
536 /* Take the guest's tlbie_lock */
537 addi r3,r9,KVM_TLBIE_LOCK
545 mtspr SPRN_SDR1,r6 /* switch to partition page table */
547 /* Set up HID4 with the guest's LPID etc. */
552 /* drop the guest's tlbie_lock */
556 /* Check if HDEC expires soon */
559 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
562 /* Enable HDEC interrupts */
565 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
575 /* Do we have a guest vcpu to run? */
577 beq kvmppc_primary_no_guest
580 /* Load up guest SLB entries */
581 lwz r5,VCPU_SLB_MAX(r4)
586 1: ld r8,VCPU_SLB_E(r6)
589 addi r6,r6,VCPU_SLB_SIZE
592 /* Increment yield count if they have a VPA */
596 lwz r5, LPPACA_YIELDCOUNT(r3)
598 stw r5, LPPACA_YIELDCOUNT(r3)
600 stb r6, VCPU_VPA_DIRTY(r4)
604 /* Save purr/spurr */
607 std r5,HSTATE_PURR(r13)
608 std r6,HSTATE_SPURR(r13)
613 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
616 /* Set partition DABR */
617 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
622 BEGIN_FTR_SECTION_NESTED(89)
624 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
625 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
627 /* Load guest PMU registers */
628 /* R4 is live here (vcpu pointer) */
630 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
631 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
633 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
634 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
635 lwz r6, VCPU_PMC + 8(r4)
636 lwz r7, VCPU_PMC + 12(r4)
637 lwz r8, VCPU_PMC + 16(r4)
638 lwz r9, VCPU_PMC + 20(r4)
640 lwz r10, VCPU_PMC + 24(r4)
641 lwz r11, VCPU_PMC + 28(r4)
642 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
652 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
654 ld r5, VCPU_MMCR + 8(r4)
655 ld r6, VCPU_MMCR + 16(r4)
665 /* Load up FP, VMX and VSX registers */
668 ld r14, VCPU_GPR(R14)(r4)
669 ld r15, VCPU_GPR(R15)(r4)
670 ld r16, VCPU_GPR(R16)(r4)
671 ld r17, VCPU_GPR(R17)(r4)
672 ld r18, VCPU_GPR(R18)(r4)
673 ld r19, VCPU_GPR(R19)(r4)
674 ld r20, VCPU_GPR(R20)(r4)
675 ld r21, VCPU_GPR(R21)(r4)
676 ld r22, VCPU_GPR(R22)(r4)
677 ld r23, VCPU_GPR(R23)(r4)
678 ld r24, VCPU_GPR(R24)(r4)
679 ld r25, VCPU_GPR(R25)(r4)
680 ld r26, VCPU_GPR(R26)(r4)
681 ld r27, VCPU_GPR(R27)(r4)
682 ld r28, VCPU_GPR(R28)(r4)
683 ld r29, VCPU_GPR(R29)(r4)
684 ld r30, VCPU_GPR(R30)(r4)
685 ld r31, VCPU_GPR(R31)(r4)
688 /* Switch DSCR to guest value */
691 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
694 * Set the decrementer to the guest decrementer.
696 ld r8,VCPU_DEC_EXPIRES(r4)
702 ld r5, VCPU_SPRG0(r4)
703 ld r6, VCPU_SPRG1(r4)
704 ld r7, VCPU_SPRG2(r4)
705 ld r8, VCPU_SPRG3(r4)
711 /* Load up DAR and DSISR */
713 lwz r6, VCPU_DSISR(r4)
718 /* Restore AMR and UAMOR, set AMOR to all 1s */
725 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
727 /* Restore state of CTRL run bit; assume 1 on entry */
743 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
747 /* r11 = vcpu->arch.msr & ~MSR_HV */
748 rldicl r11, r11, 63 - MSR_HV_LG, 1
749 rotldi r11, r11, 1 + MSR_HV_LG
752 /* Check if we can deliver an external or decrementer interrupt now */
753 ld r0,VCPU_PENDING_EXC(r4)
754 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
764 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
766 li r0,BOOK3S_INTERRUPT_EXTERNAL
770 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
776 li r0,BOOK3S_INTERRUPT_DECREMENTER
779 /* Move SRR0 and SRR1 into the respective regs */
780 5: mtspr SPRN_SRR0, r6
786 * R10: value for HSRR0
787 * R11: value for HSRR1
792 stb r0,VCPU_CEDED(r4) /* cancel cede */
796 /* Activate guest mode, so faults get handled by KVM */
797 li r9, KVM_GUEST_MODE_GUEST_HV
798 stb r9, HSTATE_IN_GUEST(r13)
805 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
808 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
815 ld r1, VCPU_GPR(R1)(r4)
816 ld r2, VCPU_GPR(R2)(r4)
817 ld r3, VCPU_GPR(R3)(r4)
818 ld r5, VCPU_GPR(R5)(r4)
819 ld r6, VCPU_GPR(R6)(r4)
820 ld r7, VCPU_GPR(R7)(r4)
821 ld r8, VCPU_GPR(R8)(r4)
822 ld r9, VCPU_GPR(R9)(r4)
823 ld r10, VCPU_GPR(R10)(r4)
824 ld r11, VCPU_GPR(R11)(r4)
825 ld r12, VCPU_GPR(R12)(r4)
826 ld r13, VCPU_GPR(R13)(r4)
830 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
831 ld r0, VCPU_GPR(R0)(r4)
832 ld r4, VCPU_GPR(R4)(r4)
837 /******************************************************************************
841 *****************************************************************************/
844 * We come here from the first-level interrupt handlers.
846 .globl kvmppc_interrupt_hv
850 * R12 = interrupt vector
852 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
853 * guest R13 saved in SPRN_SCRATCH0
855 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
856 std r9, HSTATE_HOST_R2(r13)
858 lbz r9, HSTATE_IN_GUEST(r13)
859 cmpwi r9, KVM_GUEST_MODE_HOST_HV
860 beq kvmppc_bad_host_intr
861 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
862 cmpwi r9, KVM_GUEST_MODE_GUEST
863 ld r9, HSTATE_HOST_R2(r13)
864 beq kvmppc_interrupt_pr
866 /* We're now back in the host but in guest MMU context */
867 li r9, KVM_GUEST_MODE_HOST_HV
868 stb r9, HSTATE_IN_GUEST(r13)
870 ld r9, HSTATE_KVM_VCPU(r13)
874 std r0, VCPU_GPR(R0)(r9)
875 std r1, VCPU_GPR(R1)(r9)
876 std r2, VCPU_GPR(R2)(r9)
877 std r3, VCPU_GPR(R3)(r9)
878 std r4, VCPU_GPR(R4)(r9)
879 std r5, VCPU_GPR(R5)(r9)
880 std r6, VCPU_GPR(R6)(r9)
881 std r7, VCPU_GPR(R7)(r9)
882 std r8, VCPU_GPR(R8)(r9)
883 ld r0, HSTATE_HOST_R2(r13)
884 std r0, VCPU_GPR(R9)(r9)
885 std r10, VCPU_GPR(R10)(r9)
886 std r11, VCPU_GPR(R11)(r9)
887 ld r3, HSTATE_SCRATCH0(r13)
888 lwz r4, HSTATE_SCRATCH1(r13)
889 std r3, VCPU_GPR(R12)(r9)
892 ld r3, HSTATE_CFAR(r13)
893 std r3, VCPU_CFAR(r9)
894 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
896 ld r4, HSTATE_PPR(r13)
898 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
900 /* Restore R1/R2 so we can handle faults */
901 ld r1, HSTATE_HOST_R1(r13)
906 std r10, VCPU_SRR0(r9)
907 std r11, VCPU_SRR1(r9)
908 andi. r0, r12, 2 /* need to read HSRR0/1? */
910 mfspr r10, SPRN_HSRR0
911 mfspr r11, SPRN_HSRR1
913 1: std r10, VCPU_PC(r9)
914 std r11, VCPU_MSR(r9)
918 std r3, VCPU_GPR(R13)(r9)
921 stw r12,VCPU_TRAP(r9)
923 /* Save HEIR (HV emulation assist reg) in last_inst
924 if this is an HEI (HV emulation interrupt, e40) */
925 li r3,KVM_INST_FETCH_FAILED
927 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
930 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
931 11: stw r3,VCPU_LAST_INST(r9)
933 /* these are volatile across C function calls */
940 /* If this is a page table miss then see if it's theirs or ours */
941 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
943 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
945 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
947 /* See if this is a leftover HDEC interrupt */
948 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
954 /* See if this is an hcall we can handle in real mode */
955 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
956 beq hcall_try_real_mode
958 /* Only handle external interrupts here on arch 206 and later */
960 b ext_interrupt_to_host
961 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
963 /* External interrupt ? */
964 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
965 bne+ ext_interrupt_to_host
967 /* External interrupt, first check for host_ipi. If this is
968 * set, we know the host wants us out so let's do it now
973 bgt ext_interrupt_to_host
975 /* Allright, looks like an IPI for the guest, we need to set MER */
976 /* Check if any CPU is heading out to the host, if so head out too */
977 ld r5, HSTATE_KVM_VCORE(r13)
978 lwz r0, VCORE_ENTRY_EXIT(r5)
980 bge ext_interrupt_to_host
982 /* See if there is a pending interrupt for the guest */
984 ld r0, VCPU_PENDING_EXC(r9)
985 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
986 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
987 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
990 /* And if the guest EE is set, we can deliver immediately, else
991 * we return to the guest with MER set
993 andi. r0, r11, MSR_EE
997 li r10, BOOK3S_INTERRUPT_EXTERNAL
998 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1004 ext_interrupt_to_host:
1006 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1007 /* Save more register state */
1010 std r6, VCPU_DAR(r9)
1011 stw r7, VCPU_DSISR(r9)
1013 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1014 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1016 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1017 std r6, VCPU_FAULT_DAR(r9)
1018 stw r7, VCPU_FAULT_DSISR(r9)
1020 /* See if it is a machine check */
1021 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1022 beq machine_check_realmode
1025 /* Save guest CTRL register, set runlatch to 1 */
1026 6: mfspr r6,SPRN_CTRLF
1027 stw r6,VCPU_CTRL(r9)
1033 /* Read the guest SLB and save it away */
1034 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1040 andis. r0,r8,SLB_ESID_V@h
1042 add r8,r8,r6 /* put index in */
1044 std r8,VCPU_SLB_E(r7)
1045 std r3,VCPU_SLB_V(r7)
1046 addi r7,r7,VCPU_SLB_SIZE
1050 stw r5,VCPU_SLB_MAX(r9)
1053 * Save the guest PURR/SPURR
1059 ld r8,VCPU_SPURR(r9)
1060 std r5,VCPU_PURR(r9)
1061 std r6,VCPU_SPURR(r9)
1066 * Restore host PURR/SPURR and add guest times
1067 * so that the time in the guest gets accounted.
1069 ld r3,HSTATE_PURR(r13)
1070 ld r4,HSTATE_SPURR(r13)
1075 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1082 std r5,VCPU_DEC_EXPIRES(r9)
1084 /* Save and reset AMR and UAMOR before turning on the MMU */
1089 std r6,VCPU_UAMOR(r9)
1092 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1094 /* Switch DSCR back to host value */
1097 ld r7, HSTATE_DSCR(r13)
1098 std r8, VCPU_DSCR(r9)
1100 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1102 /* Save non-volatile GPRs */
1103 std r14, VCPU_GPR(R14)(r9)
1104 std r15, VCPU_GPR(R15)(r9)
1105 std r16, VCPU_GPR(R16)(r9)
1106 std r17, VCPU_GPR(R17)(r9)
1107 std r18, VCPU_GPR(R18)(r9)
1108 std r19, VCPU_GPR(R19)(r9)
1109 std r20, VCPU_GPR(R20)(r9)
1110 std r21, VCPU_GPR(R21)(r9)
1111 std r22, VCPU_GPR(R22)(r9)
1112 std r23, VCPU_GPR(R23)(r9)
1113 std r24, VCPU_GPR(R24)(r9)
1114 std r25, VCPU_GPR(R25)(r9)
1115 std r26, VCPU_GPR(R26)(r9)
1116 std r27, VCPU_GPR(R27)(r9)
1117 std r28, VCPU_GPR(R28)(r9)
1118 std r29, VCPU_GPR(R29)(r9)
1119 std r30, VCPU_GPR(R30)(r9)
1120 std r31, VCPU_GPR(R31)(r9)
1123 mfspr r3, SPRN_SPRG0
1124 mfspr r4, SPRN_SPRG1
1125 mfspr r5, SPRN_SPRG2
1126 mfspr r6, SPRN_SPRG3
1127 std r3, VCPU_SPRG0(r9)
1128 std r4, VCPU_SPRG1(r9)
1129 std r5, VCPU_SPRG2(r9)
1130 std r6, VCPU_SPRG3(r9)
1136 /* Increment yield count if they have a VPA */
1137 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1140 lwz r3, LPPACA_YIELDCOUNT(r8)
1142 stw r3, LPPACA_YIELDCOUNT(r8)
1144 stb r3, VCPU_VPA_DIRTY(r9)
1146 /* Save PMU registers if requested */
1147 /* r8 and cr0.eq are live here */
1149 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1150 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1151 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1152 mfspr r6, SPRN_MMCRA
1154 /* On P7, clear MMCRA in order to disable SDAR updates */
1156 mtspr SPRN_MMCRA, r7
1157 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1159 beq 21f /* if no VPA, save PMU stuff anyway */
1160 lbz r7, LPPACA_PMCINUSE(r8)
1161 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1163 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1165 21: mfspr r5, SPRN_MMCR1
1168 std r4, VCPU_MMCR(r9)
1169 std r5, VCPU_MMCR + 8(r9)
1170 std r6, VCPU_MMCR + 16(r9)
1171 std r7, VCPU_SIAR(r9)
1172 std r8, VCPU_SDAR(r9)
1180 mfspr r10, SPRN_PMC7
1181 mfspr r11, SPRN_PMC8
1182 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1183 stw r3, VCPU_PMC(r9)
1184 stw r4, VCPU_PMC + 4(r9)
1185 stw r5, VCPU_PMC + 8(r9)
1186 stw r6, VCPU_PMC + 12(r9)
1187 stw r7, VCPU_PMC + 16(r9)
1188 stw r8, VCPU_PMC + 20(r9)
1190 stw r10, VCPU_PMC + 24(r9)
1191 stw r11, VCPU_PMC + 28(r9)
1192 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1200 hdec_soon: /* r12 = trap, r13 = paca */
1203 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1205 * POWER7 guest -> host partition switch code.
1206 * We don't have to lock against tlbies but we do
1207 * have to coordinate the hardware threads.
1209 /* Increment the threads-exiting-guest count in the 0xff00
1210 bits of vcore->entry_exit_count */
1212 ld r5,HSTATE_KVM_VCORE(r13)
1213 addi r6,r5,VCORE_ENTRY_EXIT
1221 * At this point we have an interrupt that we have to pass
1222 * up to the kernel or qemu; we can't handle it in real mode.
1223 * Thus we have to do a partition switch, so we have to
1224 * collect the other threads, if we are the first thread
1225 * to take an interrupt. To do this, we set the HDEC to 0,
1226 * which causes an HDEC interrupt in all threads within 2ns
1227 * because the HDEC register is shared between all 4 threads.
1228 * However, we don't need to bother if this is an HDEC
1229 * interrupt, since the other threads will already be on their
1230 * way here in that case.
1232 cmpwi r3,0x100 /* Are we the first here? */
1234 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1240 * Send an IPI to any napping threads, since an HDEC interrupt
1241 * doesn't wake CPUs up from nap.
1243 lwz r3,VCORE_NAPPING_THREADS(r5)
1244 lbz r4,HSTATE_PTID(r13)
1247 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1249 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1253 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1256 stbcix r0,r7,r8 /* trigger the IPI */
1258 addi r6,r6,PACA_SIZE
1262 /* Secondary threads wait for primary to do partition switch */
1263 43: ld r5,HSTATE_KVM_VCORE(r13)
1264 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1265 lbz r3,HSTATE_PTID(r13)
1269 13: lbz r3,VCORE_IN_GUEST(r5)
1275 /* Primary thread waits for all the secondaries to exit guest */
1276 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1283 /* Primary thread switches back to host partition */
1284 ld r6,KVM_HOST_SDR1(r4)
1285 lwz r7,KVM_HOST_LPID(r4)
1286 li r8,LPID_RSVD /* switch to reserved LPID */
1289 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1293 /* Subtract timebase offset from timebase */
1294 ld r8,VCORE_TB_OFFSET(r5)
1297 mftb r6 /* current host timebase */
1299 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1300 mftb r7 /* check if lower 24 bits overflowed */
1305 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1309 17: ld r0, VCORE_PCR(r5)
1315 /* Signal secondary CPUs to continue */
1316 stb r0,VCORE_IN_GUEST(r5)
1317 lis r8,0x7fff /* MAX_INT@h */
1320 16: ld r8,KVM_HOST_LPCR(r4)
1326 * PPC970 guest -> host partition switch code.
1327 * We have to lock against concurrent tlbies, and
1328 * we have to flush the whole TLB.
1330 32: ld r5,HSTATE_KVM_VCORE(r13)
1331 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1333 /* Take the guest's tlbie_lock */
1334 #ifdef __BIG_ENDIAN__
1335 lwz r8,PACA_LOCK_TOKEN(r13)
1337 lwz r8,PACAPACAINDEX(r13)
1339 addi r3,r4,KVM_TLBIE_LOCK
1347 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1349 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1353 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1356 stw r0,0(r3) /* drop guest tlbie_lock */
1358 /* invalidate the whole TLB */
1367 /* take native_tlbie_lock */
1368 ld r3,toc_tlbie_lock@toc(2)
1376 ld r6,KVM_HOST_SDR1(r4)
1377 mtspr SPRN_SDR1,r6 /* switch to host page table */
1379 /* Set up host HID4 value */
1384 stw r0,0(r3) /* drop native_tlbie_lock */
1386 lis r8,0x7fff /* MAX_INT@h */
1389 /* Disable HDEC interrupts */
1392 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1402 /* load host SLB entries */
1403 33: ld r8,PACA_SLBSHADOWPTR(r13)
1405 .rept SLB_NUM_BOLTED
1406 ld r5,SLBSHADOW_SAVEAREA(r8)
1407 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1408 andis. r7,r5,SLB_ESID_V@h
1414 /* Unset guest mode */
1415 li r0, KVM_GUEST_MODE_NONE
1416 stb r0, HSTATE_IN_GUEST(r13)
1418 ld r0, 112+PPC_LR_STKOFF(r1)
1424 * Check whether an HDSI is an HPTE not found fault or something else.
1425 * If it is an HPTE not found fault that is due to the guest accessing
1426 * a page that they have mapped but which we have paged out, then
1427 * we continue on with the guest exit path. In all other cases,
1428 * reflect the HDSI to the guest as a DSI.
1432 mfspr r6, SPRN_HDSISR
1433 /* HPTE not found fault or protection fault? */
1434 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1435 beq 1f /* if not, send it to the guest */
1436 andi. r0, r11, MSR_DR /* data relocation enabled? */
1439 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1440 bne 1f /* if no SLB entry found */
1441 4: std r4, VCPU_FAULT_DAR(r9)
1442 stw r6, VCPU_FAULT_DSISR(r9)
1444 /* Search the hash table. */
1445 mr r3, r9 /* vcpu pointer */
1446 li r7, 1 /* data fault */
1447 bl .kvmppc_hpte_hv_fault
1448 ld r9, HSTATE_KVM_VCPU(r13)
1450 ld r11, VCPU_MSR(r9)
1451 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1452 cmpdi r3, 0 /* retry the instruction */
1454 cmpdi r3, -1 /* handle in kernel mode */
1456 cmpdi r3, -2 /* MMIO emulation; need instr word */
1459 /* Synthesize a DSI for the guest */
1460 ld r4, VCPU_FAULT_DAR(r9)
1462 1: mtspr SPRN_DAR, r4
1463 mtspr SPRN_DSISR, r6
1464 mtspr SPRN_SRR0, r10
1465 mtspr SPRN_SRR1, r11
1466 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1467 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1469 fast_interrupt_c_return:
1470 6: ld r7, VCPU_CTR(r9)
1471 lwz r8, VCPU_XER(r9)
1477 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1478 ld r5, KVM_VRMA_SLB_V(r5)
1481 /* If this is for emulated MMIO, load the instruction word */
1482 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1484 /* Set guest mode to 'jump over instruction' so if lwz faults
1485 * we'll just continue at the next IP. */
1486 li r0, KVM_GUEST_MODE_SKIP
1487 stb r0, HSTATE_IN_GUEST(r13)
1489 /* Do the access with MSR:DR enabled */
1491 ori r4, r3, MSR_DR /* Enable paging for data */
1496 /* Store the result */
1497 stw r8, VCPU_LAST_INST(r9)
1499 /* Unset guest mode. */
1500 li r0, KVM_GUEST_MODE_HOST_HV
1501 stb r0, HSTATE_IN_GUEST(r13)
1505 * Similarly for an HISI, reflect it to the guest as an ISI unless
1506 * it is an HPTE not found fault for a page that we have paged out.
1509 andis. r0, r11, SRR1_ISI_NOPT@h
1511 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1514 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1515 bne 1f /* if no SLB entry found */
1517 /* Search the hash table. */
1518 mr r3, r9 /* vcpu pointer */
1521 li r7, 0 /* instruction fault */
1522 bl .kvmppc_hpte_hv_fault
1523 ld r9, HSTATE_KVM_VCPU(r13)
1525 ld r11, VCPU_MSR(r9)
1526 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1527 cmpdi r3, 0 /* retry the instruction */
1528 beq fast_interrupt_c_return
1529 cmpdi r3, -1 /* handle in kernel mode */
1532 /* Synthesize an ISI for the guest */
1534 1: mtspr SPRN_SRR0, r10
1535 mtspr SPRN_SRR1, r11
1536 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1537 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1539 b fast_interrupt_c_return
1541 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1542 ld r5, KVM_VRMA_SLB_V(r6)
1546 * Try to handle an hcall in real mode.
1547 * Returns to the guest if we handle it, or continues on up to
1548 * the kernel if we can't (i.e. if we don't have a handler for
1549 * it, or if the handler returns H_TOO_HARD).
1551 .globl hcall_try_real_mode
1552 hcall_try_real_mode:
1553 ld r3,VCPU_GPR(R3)(r9)
1555 /* sc 1 from userspace - reflect to guest syscall */
1556 bne sc_1_fast_return
1558 cmpldi r3,hcall_real_table_end - hcall_real_table
1560 LOAD_REG_ADDR(r4, hcall_real_table)
1566 mr r3,r9 /* get vcpu pointer */
1567 ld r4,VCPU_GPR(R4)(r9)
1570 beq hcall_real_fallback
1571 ld r4,HSTATE_KVM_VCPU(r13)
1572 std r3,VCPU_GPR(R3)(r4)
1580 li r10, BOOK3S_INTERRUPT_SYSCALL
1581 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1586 /* We've attempted a real mode hcall, but it's punted it back
1587 * to userspace. We need to restore some clobbered volatiles
1588 * before resuming the pass-it-to-qemu path */
1589 hcall_real_fallback:
1590 li r12,BOOK3S_INTERRUPT_SYSCALL
1591 ld r9, HSTATE_KVM_VCPU(r13)
1595 .globl hcall_real_table
1597 .long 0 /* 0 - unused */
1598 .long .kvmppc_h_remove - hcall_real_table
1599 .long .kvmppc_h_enter - hcall_real_table
1600 .long .kvmppc_h_read - hcall_real_table
1601 .long 0 /* 0x10 - H_CLEAR_MOD */
1602 .long 0 /* 0x14 - H_CLEAR_REF */
1603 .long .kvmppc_h_protect - hcall_real_table
1604 .long 0 /* 0x1c - H_GET_TCE */
1605 .long .kvmppc_h_put_tce - hcall_real_table
1606 .long 0 /* 0x24 - H_SET_SPRG0 */
1607 .long .kvmppc_h_set_dabr - hcall_real_table
1622 #ifdef CONFIG_KVM_XICS
1623 .long .kvmppc_rm_h_eoi - hcall_real_table
1624 .long .kvmppc_rm_h_cppr - hcall_real_table
1625 .long .kvmppc_rm_h_ipi - hcall_real_table
1626 .long 0 /* 0x70 - H_IPOLL */
1627 .long .kvmppc_rm_h_xirr - hcall_real_table
1629 .long 0 /* 0x64 - H_EOI */
1630 .long 0 /* 0x68 - H_CPPR */
1631 .long 0 /* 0x6c - H_IPI */
1632 .long 0 /* 0x70 - H_IPOLL */
1633 .long 0 /* 0x74 - H_XIRR */
1661 .long .kvmppc_h_cede - hcall_real_table
1678 .long .kvmppc_h_bulk_remove - hcall_real_table
1679 hcall_real_table_end:
1685 _GLOBAL(kvmppc_h_set_dabr)
1688 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1689 std r4,VCPU_DABR(r3)
1690 /* Work around P7 bug where DABR can get corrupted on mtspr */
1691 1: mtspr SPRN_DABR,r4
1699 _GLOBAL(kvmppc_h_cede)
1701 std r11,VCPU_MSR(r3)
1703 stb r0,VCPU_CEDED(r3)
1704 sync /* order setting ceded vs. testing prodded */
1705 lbz r5,VCPU_PRODDED(r3)
1707 bne kvm_cede_prodded
1708 li r0,0 /* set trap to 0 to say hcall is handled */
1709 stw r0,VCPU_TRAP(r3)
1711 std r0,VCPU_GPR(R3)(r3)
1713 b kvm_cede_exit /* just send it up to host on 970 */
1714 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1717 * Set our bit in the bitmask of napping threads unless all the
1718 * other threads are already napping, in which case we send this
1721 ld r5,HSTATE_KVM_VCORE(r13)
1722 lbz r6,HSTATE_PTID(r13)
1723 lwz r8,VCORE_ENTRY_EXIT(r5)
1727 addi r6,r5,VCORE_NAPPING_THREADS
1736 stb r0,HSTATE_NAPPING(r13)
1737 /* order napping_threads update vs testing entry_exit_count */
1740 lwz r7,VCORE_ENTRY_EXIT(r5)
1742 bge 33f /* another thread already exiting */
1745 * Although not specifically required by the architecture, POWER7
1746 * preserves the following registers in nap mode, even if an SMT mode
1747 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1748 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1750 /* Save non-volatile GPRs */
1751 std r14, VCPU_GPR(R14)(r3)
1752 std r15, VCPU_GPR(R15)(r3)
1753 std r16, VCPU_GPR(R16)(r3)
1754 std r17, VCPU_GPR(R17)(r3)
1755 std r18, VCPU_GPR(R18)(r3)
1756 std r19, VCPU_GPR(R19)(r3)
1757 std r20, VCPU_GPR(R20)(r3)
1758 std r21, VCPU_GPR(R21)(r3)
1759 std r22, VCPU_GPR(R22)(r3)
1760 std r23, VCPU_GPR(R23)(r3)
1761 std r24, VCPU_GPR(R24)(r3)
1762 std r25, VCPU_GPR(R25)(r3)
1763 std r26, VCPU_GPR(R26)(r3)
1764 std r27, VCPU_GPR(R27)(r3)
1765 std r28, VCPU_GPR(R28)(r3)
1766 std r29, VCPU_GPR(R29)(r3)
1767 std r30, VCPU_GPR(R30)(r3)
1768 std r31, VCPU_GPR(R31)(r3)
1774 * Take a nap until a decrementer or external interrupt occurs,
1775 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1778 stb r0,HSTATE_HWTHREAD_REQ(r13)
1780 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1784 std r0, HSTATE_SCRATCH0(r13)
1786 ld r0, HSTATE_SCRATCH0(r13)
1793 /* get vcpu pointer */
1794 ld r4, HSTATE_KVM_VCPU(r13)
1796 /* Woken by external or decrementer interrupt */
1797 ld r1, HSTATE_HOST_R1(r13)
1799 /* load up FP state */
1803 ld r14, VCPU_GPR(R14)(r4)
1804 ld r15, VCPU_GPR(R15)(r4)
1805 ld r16, VCPU_GPR(R16)(r4)
1806 ld r17, VCPU_GPR(R17)(r4)
1807 ld r18, VCPU_GPR(R18)(r4)
1808 ld r19, VCPU_GPR(R19)(r4)
1809 ld r20, VCPU_GPR(R20)(r4)
1810 ld r21, VCPU_GPR(R21)(r4)
1811 ld r22, VCPU_GPR(R22)(r4)
1812 ld r23, VCPU_GPR(R23)(r4)
1813 ld r24, VCPU_GPR(R24)(r4)
1814 ld r25, VCPU_GPR(R25)(r4)
1815 ld r26, VCPU_GPR(R26)(r4)
1816 ld r27, VCPU_GPR(R27)(r4)
1817 ld r28, VCPU_GPR(R28)(r4)
1818 ld r29, VCPU_GPR(R29)(r4)
1819 ld r30, VCPU_GPR(R30)(r4)
1820 ld r31, VCPU_GPR(R31)(r4)
1822 /* clear our bit in vcore->napping_threads */
1823 33: ld r5,HSTATE_KVM_VCORE(r13)
1824 lbz r3,HSTATE_PTID(r13)
1827 addi r6,r5,VCORE_NAPPING_THREADS
1833 stb r0,HSTATE_NAPPING(r13)
1835 /* Check the wake reason in SRR1 to see why we got here */
1837 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1838 cmpwi r3, 4 /* was it an external interrupt? */
1839 li r12, BOOK3S_INTERRUPT_EXTERNAL
1842 ld r11, VCPU_MSR(r9)
1843 beq do_ext_interrupt /* if so */
1845 /* see if any other thread is already exiting */
1846 lwz r0,VCORE_ENTRY_EXIT(r5)
1848 blt kvmppc_cede_reentry /* if not go back to guest */
1850 /* some threads are exiting, so go to the guest exit path */
1851 b hcall_real_fallback
1853 /* cede when already previously prodded case */
1856 stb r0,VCPU_PRODDED(r3)
1857 sync /* order testing prodded vs. clearing ceded */
1858 stb r0,VCPU_CEDED(r3)
1862 /* we've ceded but we want to give control to the host */
1864 b hcall_real_fallback
1866 /* Try to handle a machine check in real mode */
1867 machine_check_realmode:
1868 mr r3, r9 /* get vcpu pointer */
1869 bl .kvmppc_realmode_machine_check
1871 cmpdi r3, 0 /* continue exiting from guest? */
1872 ld r9, HSTATE_KVM_VCPU(r13)
1873 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1875 /* If not, deliver a machine check. SRR0/1 are already set */
1876 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1877 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1879 b fast_interrupt_c_return
1882 * Determine what sort of external interrupt is pending (if any).
1884 * 0 if no interrupt is pending
1885 * 1 if an interrupt is pending that needs to be handled by the host
1886 * -1 if there was a guest wakeup IPI (which has now been cleared)
1889 /* see if a host IPI is pending */
1891 lbz r0, HSTATE_HOST_IPI(r13)
1895 /* Now read the interrupt from the ICP */
1896 ld r6, HSTATE_XICS_PHYS(r13)
1901 rlwinm. r3, r0, 0, 0xffffff
1903 beq 1f /* if nothing pending in the ICP */
1905 /* We found something in the ICP...
1907 * If it's not an IPI, stash it in the PACA and return to
1908 * the host, we don't (yet) handle directing real external
1909 * interrupts directly to the guest
1911 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1915 /* It's an IPI, clear the MFRR and EOI it */
1918 stbcix r3, r6, r8 /* clear the IPI */
1919 stwcix r0, r6, r7 /* EOI it */
1922 /* We need to re-check host IPI now in case it got set in the
1923 * meantime. If it's clear, we bounce the interrupt to the
1926 lbz r0, HSTATE_HOST_IPI(r13)
1930 /* OK, it's an IPI for us */
1934 42: /* It's not an IPI and it's for the host, stash it in the PACA
1935 * before exit, it will be picked up by the host ICP driver
1937 stw r0, HSTATE_SAVED_XIRR(r13)
1940 43: /* We raced with the host, we need to resend that IPI, bummer */
1942 stbcix r0, r6, r8 /* set the IPI */
1947 * Save away FP, VMX and VSX registers.
1949 * N.B. r30 and r31 are volatile across this function,
1950 * thus it is not callable from C.
1957 #ifdef CONFIG_ALTIVEC
1959 oris r8,r8,MSR_VEC@h
1960 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1964 oris r8,r8,MSR_VSX@h
1965 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1969 addi r3,r3,VCPU_FPRS
1971 #ifdef CONFIG_ALTIVEC
1973 addi r3,r31,VCPU_VRS
1975 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1977 mfspr r6,SPRN_VRSAVE
1978 stw r6,VCPU_VRSAVE(r3)
1985 * Load up FP, VMX and VSX registers
1987 * N.B. r30 and r31 are volatile across this function,
1988 * thus it is not callable from C.
1995 #ifdef CONFIG_ALTIVEC
1997 oris r8,r8,MSR_VEC@h
1998 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2002 oris r8,r8,MSR_VSX@h
2003 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2007 addi r3,r4,VCPU_FPRS
2009 #ifdef CONFIG_ALTIVEC
2011 addi r3,r31,VCPU_VRS
2013 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2015 lwz r7,VCPU_VRSAVE(r4)
2016 mtspr SPRN_VRSAVE,r7
2022 * We come here if we get any exception or interrupt while we are
2023 * executing host real mode code while in guest MMU context.
2024 * For now just spin, but we should do something better.
2026 kvmppc_bad_host_intr: