2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
32 #ifdef __LITTLE_ENDIAN__
33 #error Need to fix lppaca and SLB shadow accesses in little endian mode
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
86 lwz r3, HSTATE_PMC(r13)
87 lwz r4, HSTATE_PMC + 4(r13)
88 lwz r5, HSTATE_PMC + 8(r13)
89 lwz r6, HSTATE_PMC + 12(r13)
90 lwz r8, HSTATE_PMC + 16(r13)
91 lwz r9, HSTATE_PMC + 20(r13)
93 lwz r10, HSTATE_PMC + 24(r13)
94 lwz r11, HSTATE_PMC + 28(r13)
95 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
105 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106 ld r3, HSTATE_MMCR(r13)
107 ld r4, HSTATE_MMCR + 8(r13)
108 ld r5, HSTATE_MMCR + 16(r13)
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
119 ld r3, HSTATE_DECEXP(r13)
125 * For external and machine check interrupts, we need
126 * to call the Linux handler to process the interrupt.
127 * We do that by jumping to absolute address 0x500 for
128 * external interrupts, or the machine_check_fwnmi label
129 * for machine checks (since firmware might have patched
130 * the vector area at 0x200). The [h]rfid at the end of the
131 * handler will return to the book3s_hv_interrupts.S code.
132 * For other interrupts we do the rfid to get back
133 * to the book3s_hv_interrupts.S code here.
135 ld r8, 112+PPC_LR_STKOFF(r1)
137 ld r7, HSTATE_HOST_MSR(r13)
139 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
140 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
143 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
145 /* RFI into the highmem handler, or branch to interrupt handler */
149 mtmsrd r6, 1 /* Clear RI in MSR */
152 beqa 0x500 /* external interrupt (PPC970) */
153 beq cr1, 13f /* machine check */
156 /* On POWER7, we have external interrupts set to use HSRR0/1 */
157 11: mtspr SPRN_HSRR0, r8
161 13: b machine_check_fwnmi
164 kvmppc_primary_no_guest:
165 /* We handle this much like a ceded vcpu */
166 /* set our bit in napping_threads */
167 ld r5, HSTATE_KVM_VCORE(r13)
168 lbz r7, HSTATE_PTID(r13)
171 addi r6, r5, VCORE_NAPPING_THREADS
176 /* order napping_threads update vs testing entry_exit_count */
179 lwz r7, VCORE_ENTRY_EXIT(r5)
181 bge kvm_novcpu_exit /* another thread already exiting */
182 li r3, NAPPING_NOVCPU
183 stb r3, HSTATE_NAPPING(r13)
185 stb r3, HSTATE_HWTHREAD_REQ(r13)
190 ld r1, HSTATE_HOST_R1(r13)
191 ld r5, HSTATE_KVM_VCORE(r13)
193 stb r0, HSTATE_NAPPING(r13)
194 stb r0, HSTATE_HWTHREAD_REQ(r13)
196 /* check the wake reason */
197 bl kvmppc_check_wake_reason
199 /* see if any other thread is already exiting */
200 lwz r0, VCORE_ENTRY_EXIT(r5)
204 /* clear our bit in napping_threads */
205 lbz r7, HSTATE_PTID(r13)
208 addi r6, r5, VCORE_NAPPING_THREADS
214 /* See if the wake reason means we need to exit */
218 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
219 ld r4, HSTATE_KVM_VCPU(r13)
227 * We come in here when wakened from nap mode.
228 * Relocation is off and most register values are lost.
229 * r13 points to the PACA.
231 .globl kvm_start_guest
235 li r0,KVM_HWTHREAD_IN_KVM
236 stb r0,HSTATE_HWTHREAD_STATE(r13)
238 /* NV GPR values from power7_idle() will no longer be valid */
240 stb r0,PACA_NAPSTATELOST(r13)
242 /* were we napping due to cede? */
243 lbz r0,HSTATE_NAPPING(r13)
244 cmpwi r0,NAPPING_CEDE
246 cmpwi r0,NAPPING_NOVCPU
247 beq kvm_novcpu_wakeup
249 ld r1,PACAEMERGSP(r13)
250 subi r1,r1,STACK_FRAME_OVERHEAD
253 * We weren't napping due to cede, so this must be a secondary
254 * thread being woken up to run a guest, or being woken up due
255 * to a stray IPI. (Or due to some machine check or hypervisor
256 * maintenance interrupt while the core is in KVM.)
259 /* Check the wake reason in SRR1 to see why we got here */
260 bl kvmppc_check_wake_reason
264 /* get vcpu pointer, NULL if we have no vcpu to run */
265 ld r4,HSTATE_KVM_VCPU(r13)
267 /* if we have no vcpu to run, go back to sleep */
270 /* Set HSTATE_DSCR(r13) to something sensible */
271 LOAD_REG_ADDR(r6, dscr_default)
273 std r6, HSTATE_DSCR(r13)
277 /* Back from the guest, go back to nap */
278 /* Clear our vcpu pointer so we don't come back in early */
280 std r0, HSTATE_KVM_VCPU(r13)
283 /* increment the nap count and then go to nap mode */
284 ld r4, HSTATE_KVM_VCORE(r13)
285 addi r4, r4, VCORE_NAP_COUNT
286 lwsync /* make previous updates visible */
293 li r0, KVM_HWTHREAD_IN_NAP
294 stb r0, HSTATE_HWTHREAD_STATE(r13)
298 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
301 std r0, HSTATE_SCRATCH0(r13)
303 ld r0, HSTATE_SCRATCH0(r13)
309 /******************************************************************************
313 *****************************************************************************/
315 .global kvmppc_hv_entry
320 * R4 = vcpu pointer (or NULL)
324 * all other volatile GPRS = free
327 std r0, PPC_LR_STKOFF(r1)
330 /* Save R1 in the PACA */
331 std r1, HSTATE_HOST_R1(r13)
333 li r6, KVM_GUEST_MODE_HOST_HV
334 stb r6, HSTATE_IN_GUEST(r13)
344 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
346 * POWER7 host -> guest partition switch code.
347 * We don't have to lock against concurrent tlbies,
348 * but we do have to coordinate across hardware threads.
350 /* Increment entry count iff exit count is zero. */
351 ld r5,HSTATE_KVM_VCORE(r13)
352 addi r9,r5,VCORE_ENTRY_EXIT
354 cmpwi r3,0x100 /* any threads starting to exit? */
355 bge secondary_too_late /* if so we're too late to the party */
360 /* Primary thread switches to guest partition. */
361 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
362 lbz r6,HSTATE_PTID(r13)
367 li r0,LPID_RSVD /* switch to reserved LPID */
370 mtspr SPRN_SDR1,r6 /* switch to partition page table */
374 /* See if we need to flush the TLB */
375 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
376 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
377 srdi r6,r6,6 /* doubleword number */
378 sldi r6,r6,3 /* address offset */
380 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
386 23: ldarx r7,0,r6 /* if set, clear the bit */
390 /* Flush the TLB of any entries for this LPID */
391 /* use arch 2.07S as a proxy for POWER8 */
393 li r6,512 /* POWER8 has 512 sets */
395 li r6,128 /* POWER7 has 128 sets */
396 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
398 li r7,0x800 /* IS field = 0b10 */
405 /* Add timebase offset onto timebase */
406 22: ld r8,VCORE_TB_OFFSET(r5)
409 mftb r6 /* current host timebase */
411 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
412 mftb r7 /* check if lower 24 bits overflowed */
417 addis r8,r8,0x100 /* if so, increment upper 40 bits */
420 /* Load guest PCR value to select appropriate compat mode */
421 37: ld r7, VCORE_PCR(r5)
428 /* DPDES is shared between threads */
429 ld r8, VCORE_DPDES(r5)
431 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
434 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
437 /* Secondary threads wait for primary to have done partition switch */
438 20: lbz r0,VCORE_IN_GUEST(r5)
442 /* Set LPCR and RMOR. */
443 10: ld r8,VCORE_LPCR(r5)
449 /* Check if HDEC expires soon */
451 cmpwi r3,512 /* 1 microsecond */
452 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
457 * PPC970 host -> guest partition switch code.
458 * We have to lock against concurrent tlbies,
459 * using native_tlbie_lock to lock against host tlbies
460 * and kvm->arch.tlbie_lock to lock against guest tlbies.
461 * We also have to invalidate the TLB since its
462 * entries aren't tagged with the LPID.
464 30: ld r5,HSTATE_KVM_VCORE(r13)
465 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
467 /* first take native_tlbie_lock */
470 .tc native_tlbie_lock[TC],native_tlbie_lock
472 ld r3,toc_tlbie_lock@toc(2)
473 #ifdef __BIG_ENDIAN__
474 lwz r8,PACA_LOCK_TOKEN(r13)
476 lwz r8,PACAPACAINDEX(r13)
485 ld r5,HSTATE_KVM_VCORE(r13)
486 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
488 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
492 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
495 stw r0,0(r3) /* drop native_tlbie_lock */
497 /* invalidate the whole TLB */
506 /* Take the guest's tlbie_lock */
507 addi r3,r9,KVM_TLBIE_LOCK
515 mtspr SPRN_SDR1,r6 /* switch to partition page table */
517 /* Set up HID4 with the guest's LPID etc. */
522 /* drop the guest's tlbie_lock */
526 /* Check if HDEC expires soon */
529 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
532 /* Enable HDEC interrupts */
535 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
545 /* Do we have a guest vcpu to run? */
547 beq kvmppc_primary_no_guest
550 /* Load up guest SLB entries */
551 lwz r5,VCPU_SLB_MAX(r4)
556 1: ld r8,VCPU_SLB_E(r6)
559 addi r6,r6,VCPU_SLB_SIZE
562 /* Increment yield count if they have a VPA */
566 lwz r5, LPPACA_YIELDCOUNT(r3)
568 stw r5, LPPACA_YIELDCOUNT(r3)
570 stb r6, VCPU_VPA_DIRTY(r4)
574 /* Save purr/spurr */
577 std r5,HSTATE_PURR(r13)
578 std r6,HSTATE_SPURR(r13)
583 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
586 /* Set partition DABR */
587 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
588 lwz r5,VCPU_DABRX(r4)
592 BEGIN_FTR_SECTION_NESTED(89)
594 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
595 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
597 /* Load guest PMU registers */
598 /* R4 is live here (vcpu pointer) */
600 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
601 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
603 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
604 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
605 lwz r6, VCPU_PMC + 8(r4)
606 lwz r7, VCPU_PMC + 12(r4)
607 lwz r8, VCPU_PMC + 16(r4)
608 lwz r9, VCPU_PMC + 20(r4)
610 lwz r10, VCPU_PMC + 24(r4)
611 lwz r11, VCPU_PMC + 28(r4)
612 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
622 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
624 ld r5, VCPU_MMCR + 8(r4)
625 ld r6, VCPU_MMCR + 16(r4)
633 ld r5, VCPU_MMCR + 24(r4)
635 lwz r7, VCPU_PMC + 24(r4)
636 lwz r8, VCPU_PMC + 28(r4)
637 ld r9, VCPU_MMCR + 32(r4)
643 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
647 /* Load up FP, VMX and VSX registers */
650 ld r14, VCPU_GPR(R14)(r4)
651 ld r15, VCPU_GPR(R15)(r4)
652 ld r16, VCPU_GPR(R16)(r4)
653 ld r17, VCPU_GPR(R17)(r4)
654 ld r18, VCPU_GPR(R18)(r4)
655 ld r19, VCPU_GPR(R19)(r4)
656 ld r20, VCPU_GPR(R20)(r4)
657 ld r21, VCPU_GPR(R21)(r4)
658 ld r22, VCPU_GPR(R22)(r4)
659 ld r23, VCPU_GPR(R23)(r4)
660 ld r24, VCPU_GPR(R24)(r4)
661 ld r25, VCPU_GPR(R25)(r4)
662 ld r26, VCPU_GPR(R26)(r4)
663 ld r27, VCPU_GPR(R27)(r4)
664 ld r28, VCPU_GPR(R28)(r4)
665 ld r29, VCPU_GPR(R29)(r4)
666 ld r30, VCPU_GPR(R30)(r4)
667 ld r31, VCPU_GPR(R31)(r4)
670 /* Switch DSCR to guest value */
673 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
676 /* Skip next section on POWER7 or PPC970 */
678 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
679 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
682 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
685 /* Load up POWER8-specific registers */
687 lwz r6, VCPU_PSPB(r4)
693 ld r6, VCPU_DAWRX(r4)
694 ld r7, VCPU_CIABR(r4)
704 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
705 ld r5, VCPU_TFHAR(r4)
706 ld r6, VCPU_TFIAR(r4)
707 ld r7, VCPU_TEXASR(r4)
710 mtspr SPRN_TEXASR, r7
712 ld r8, VCPU_EBBHR(r4)
714 ld r5, VCPU_EBBRR(r4)
715 ld r6, VCPU_BESCR(r4)
716 ld r7, VCPU_CSIGR(r4)
722 ld r5, VCPU_TCSCR(r4)
724 lwz r7, VCPU_GUEST_PID(r4)
733 * Set the decrementer to the guest decrementer.
735 ld r8,VCPU_DEC_EXPIRES(r4)
741 ld r5, VCPU_SPRG0(r4)
742 ld r6, VCPU_SPRG1(r4)
743 ld r7, VCPU_SPRG2(r4)
744 ld r8, VCPU_SPRG3(r4)
750 /* Load up DAR and DSISR */
752 lwz r6, VCPU_DSISR(r4)
757 /* Restore AMR and UAMOR, set AMOR to all 1s */
764 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
766 /* Restore state of CTRL run bit; assume 1 on entry */
780 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
788 deliver_guest_interrupt:
789 /* r11 = vcpu->arch.msr & ~MSR_HV */
790 rldicl r11, r11, 63 - MSR_HV_LG, 1
791 rotldi r11, r11, 1 + MSR_HV_LG
794 /* Check if we can deliver an external or decrementer interrupt now */
795 ld r0, VCPU_PENDING_EXC(r4)
796 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
798 andi. r8, r11, MSR_EE
801 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
802 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
805 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
807 li r0, BOOK3S_INTERRUPT_EXTERNAL
811 li r0, BOOK3S_INTERRUPT_DECREMENTER
814 12: mtspr SPRN_SRR0, r10
817 ld r11, VCPU_INTR_MSR(r4)
823 * R10: value for HSRR0
824 * R11: value for HSRR1
829 stb r0,VCPU_CEDED(r4) /* cancel cede */
833 /* Activate guest mode, so faults get handled by KVM */
834 li r9, KVM_GUEST_MODE_GUEST_HV
835 stb r9, HSTATE_IN_GUEST(r13)
842 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
845 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
852 ld r1, VCPU_GPR(R1)(r4)
853 ld r2, VCPU_GPR(R2)(r4)
854 ld r3, VCPU_GPR(R3)(r4)
855 ld r5, VCPU_GPR(R5)(r4)
856 ld r6, VCPU_GPR(R6)(r4)
857 ld r7, VCPU_GPR(R7)(r4)
858 ld r8, VCPU_GPR(R8)(r4)
859 ld r9, VCPU_GPR(R9)(r4)
860 ld r10, VCPU_GPR(R10)(r4)
861 ld r11, VCPU_GPR(R11)(r4)
862 ld r12, VCPU_GPR(R12)(r4)
863 ld r13, VCPU_GPR(R13)(r4)
867 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
868 ld r0, VCPU_GPR(R0)(r4)
869 ld r4, VCPU_GPR(R4)(r4)
874 /******************************************************************************
878 *****************************************************************************/
881 * We come here from the first-level interrupt handlers.
883 .globl kvmppc_interrupt_hv
887 * R12 = interrupt vector
889 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
890 * guest R13 saved in SPRN_SCRATCH0
892 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
893 std r9, HSTATE_HOST_R2(r13)
895 lbz r9, HSTATE_IN_GUEST(r13)
896 cmpwi r9, KVM_GUEST_MODE_HOST_HV
897 beq kvmppc_bad_host_intr
898 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
899 cmpwi r9, KVM_GUEST_MODE_GUEST
900 ld r9, HSTATE_HOST_R2(r13)
901 beq kvmppc_interrupt_pr
903 /* We're now back in the host but in guest MMU context */
904 li r9, KVM_GUEST_MODE_HOST_HV
905 stb r9, HSTATE_IN_GUEST(r13)
907 ld r9, HSTATE_KVM_VCPU(r13)
911 std r0, VCPU_GPR(R0)(r9)
912 std r1, VCPU_GPR(R1)(r9)
913 std r2, VCPU_GPR(R2)(r9)
914 std r3, VCPU_GPR(R3)(r9)
915 std r4, VCPU_GPR(R4)(r9)
916 std r5, VCPU_GPR(R5)(r9)
917 std r6, VCPU_GPR(R6)(r9)
918 std r7, VCPU_GPR(R7)(r9)
919 std r8, VCPU_GPR(R8)(r9)
920 ld r0, HSTATE_HOST_R2(r13)
921 std r0, VCPU_GPR(R9)(r9)
922 std r10, VCPU_GPR(R10)(r9)
923 std r11, VCPU_GPR(R11)(r9)
924 ld r3, HSTATE_SCRATCH0(r13)
925 lwz r4, HSTATE_SCRATCH1(r13)
926 std r3, VCPU_GPR(R12)(r9)
929 ld r3, HSTATE_CFAR(r13)
930 std r3, VCPU_CFAR(r9)
931 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
933 ld r4, HSTATE_PPR(r13)
935 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
937 /* Restore R1/R2 so we can handle faults */
938 ld r1, HSTATE_HOST_R1(r13)
943 std r10, VCPU_SRR0(r9)
944 std r11, VCPU_SRR1(r9)
945 andi. r0, r12, 2 /* need to read HSRR0/1? */
947 mfspr r10, SPRN_HSRR0
948 mfspr r11, SPRN_HSRR1
950 1: std r10, VCPU_PC(r9)
951 std r11, VCPU_MSR(r9)
955 std r3, VCPU_GPR(R13)(r9)
958 stw r12,VCPU_TRAP(r9)
960 /* Save HEIR (HV emulation assist reg) in last_inst
961 if this is an HEI (HV emulation interrupt, e40) */
962 li r3,KVM_INST_FETCH_FAILED
964 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
967 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
968 11: stw r3,VCPU_LAST_INST(r9)
970 /* these are volatile across C function calls */
977 /* If this is a page table miss then see if it's theirs or ours */
978 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
980 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
982 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
984 /* See if this is a leftover HDEC interrupt */
985 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
991 /* See if this is an hcall we can handle in real mode */
992 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
993 beq hcall_try_real_mode
995 /* Only handle external interrupts here on arch 206 and later */
997 b ext_interrupt_to_host
998 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1000 /* External interrupt ? */
1001 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1002 bne+ ext_interrupt_to_host
1004 /* External interrupt, first check for host_ipi. If this is
1005 * set, we know the host wants us out so let's do it now
1009 bgt ext_interrupt_to_host
1011 /* Check if any CPU is heading out to the host, if so head out too */
1012 ld r5, HSTATE_KVM_VCORE(r13)
1013 lwz r0, VCORE_ENTRY_EXIT(r5)
1015 bge ext_interrupt_to_host
1017 /* Return to guest after delivering any pending interrupt */
1019 b deliver_guest_interrupt
1021 ext_interrupt_to_host:
1023 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1024 /* Save more register state */
1027 std r6, VCPU_DAR(r9)
1028 stw r7, VCPU_DSISR(r9)
1030 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1031 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1033 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1034 std r6, VCPU_FAULT_DAR(r9)
1035 stw r7, VCPU_FAULT_DSISR(r9)
1037 /* See if it is a machine check */
1038 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1039 beq machine_check_realmode
1042 /* Save guest CTRL register, set runlatch to 1 */
1043 6: mfspr r6,SPRN_CTRLF
1044 stw r6,VCPU_CTRL(r9)
1050 /* Read the guest SLB and save it away */
1051 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1057 andis. r0,r8,SLB_ESID_V@h
1059 add r8,r8,r6 /* put index in */
1061 std r8,VCPU_SLB_E(r7)
1062 std r3,VCPU_SLB_V(r7)
1063 addi r7,r7,VCPU_SLB_SIZE
1067 stw r5,VCPU_SLB_MAX(r9)
1070 * Save the guest PURR/SPURR
1076 ld r8,VCPU_SPURR(r9)
1077 std r5,VCPU_PURR(r9)
1078 std r6,VCPU_SPURR(r9)
1083 * Restore host PURR/SPURR and add guest times
1084 * so that the time in the guest gets accounted.
1086 ld r3,HSTATE_PURR(r13)
1087 ld r4,HSTATE_SPURR(r13)
1092 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1099 std r5,VCPU_DEC_EXPIRES(r9)
1103 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1104 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110 /* Save POWER8-specific registers */
1114 std r5, VCPU_IAMR(r9)
1115 stw r6, VCPU_PSPB(r9)
1116 std r7, VCPU_FSCR(r9)
1121 std r6, VCPU_VTB(r9)
1122 std r7, VCPU_TAR(r9)
1123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1124 mfspr r5, SPRN_TFHAR
1125 mfspr r6, SPRN_TFIAR
1126 mfspr r7, SPRN_TEXASR
1127 std r5, VCPU_TFHAR(r9)
1128 std r6, VCPU_TFIAR(r9)
1129 std r7, VCPU_TEXASR(r9)
1131 mfspr r8, SPRN_EBBHR
1132 std r8, VCPU_EBBHR(r9)
1133 mfspr r5, SPRN_EBBRR
1134 mfspr r6, SPRN_BESCR
1135 mfspr r7, SPRN_CSIGR
1137 std r5, VCPU_EBBRR(r9)
1138 std r6, VCPU_BESCR(r9)
1139 std r7, VCPU_CSIGR(r9)
1140 std r8, VCPU_TACR(r9)
1141 mfspr r5, SPRN_TCSCR
1145 std r5, VCPU_TCSCR(r9)
1146 std r6, VCPU_ACOP(r9)
1147 stw r7, VCPU_GUEST_PID(r9)
1148 std r8, VCPU_WORT(r9)
1151 /* Save and reset AMR and UAMOR before turning on the MMU */
1156 std r6,VCPU_UAMOR(r9)
1159 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1161 /* Switch DSCR back to host value */
1164 ld r7, HSTATE_DSCR(r13)
1165 std r8, VCPU_DSCR(r9)
1167 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1169 /* Save non-volatile GPRs */
1170 std r14, VCPU_GPR(R14)(r9)
1171 std r15, VCPU_GPR(R15)(r9)
1172 std r16, VCPU_GPR(R16)(r9)
1173 std r17, VCPU_GPR(R17)(r9)
1174 std r18, VCPU_GPR(R18)(r9)
1175 std r19, VCPU_GPR(R19)(r9)
1176 std r20, VCPU_GPR(R20)(r9)
1177 std r21, VCPU_GPR(R21)(r9)
1178 std r22, VCPU_GPR(R22)(r9)
1179 std r23, VCPU_GPR(R23)(r9)
1180 std r24, VCPU_GPR(R24)(r9)
1181 std r25, VCPU_GPR(R25)(r9)
1182 std r26, VCPU_GPR(R26)(r9)
1183 std r27, VCPU_GPR(R27)(r9)
1184 std r28, VCPU_GPR(R28)(r9)
1185 std r29, VCPU_GPR(R29)(r9)
1186 std r30, VCPU_GPR(R30)(r9)
1187 std r31, VCPU_GPR(R31)(r9)
1190 mfspr r3, SPRN_SPRG0
1191 mfspr r4, SPRN_SPRG1
1192 mfspr r5, SPRN_SPRG2
1193 mfspr r6, SPRN_SPRG3
1194 std r3, VCPU_SPRG0(r9)
1195 std r4, VCPU_SPRG1(r9)
1196 std r5, VCPU_SPRG2(r9)
1197 std r6, VCPU_SPRG3(r9)
1203 /* Increment yield count if they have a VPA */
1204 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1207 lwz r3, LPPACA_YIELDCOUNT(r8)
1209 stw r3, LPPACA_YIELDCOUNT(r8)
1211 stb r3, VCPU_VPA_DIRTY(r9)
1213 /* Save PMU registers if requested */
1214 /* r8 and cr0.eq are live here */
1216 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1217 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1218 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1219 mfspr r6, SPRN_MMCRA
1221 /* On P7, clear MMCRA in order to disable SDAR updates */
1223 mtspr SPRN_MMCRA, r7
1224 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1226 beq 21f /* if no VPA, save PMU stuff anyway */
1227 lbz r7, LPPACA_PMCINUSE(r8)
1228 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1230 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1232 21: mfspr r5, SPRN_MMCR1
1235 std r4, VCPU_MMCR(r9)
1236 std r5, VCPU_MMCR + 8(r9)
1237 std r6, VCPU_MMCR + 16(r9)
1238 std r7, VCPU_SIAR(r9)
1239 std r8, VCPU_SDAR(r9)
1247 mfspr r10, SPRN_PMC7
1248 mfspr r11, SPRN_PMC8
1249 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1250 stw r3, VCPU_PMC(r9)
1251 stw r4, VCPU_PMC + 4(r9)
1252 stw r5, VCPU_PMC + 8(r9)
1253 stw r6, VCPU_PMC + 12(r9)
1254 stw r7, VCPU_PMC + 16(r9)
1255 stw r8, VCPU_PMC + 20(r9)
1257 stw r10, VCPU_PMC + 24(r9)
1258 stw r11, VCPU_PMC + 28(r9)
1259 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1261 mfspr r4, SPRN_MMCR2
1263 mfspr r6, SPRN_SPMC1
1264 mfspr r7, SPRN_SPMC2
1265 mfspr r8, SPRN_MMCRS
1266 std r4, VCPU_MMCR + 24(r9)
1267 std r5, VCPU_SIER(r9)
1268 stw r6, VCPU_PMC + 24(r9)
1269 stw r7, VCPU_PMC + 28(r9)
1270 std r8, VCPU_MMCR + 32(r9)
1272 mtspr SPRN_MMCRS, r4
1273 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1281 hdec_soon: /* r12 = trap, r13 = paca */
1284 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1286 * POWER7 guest -> host partition switch code.
1287 * We don't have to lock against tlbies but we do
1288 * have to coordinate the hardware threads.
1290 /* Increment the threads-exiting-guest count in the 0xff00
1291 bits of vcore->entry_exit_count */
1293 ld r5,HSTATE_KVM_VCORE(r13)
1294 addi r6,r5,VCORE_ENTRY_EXIT
1302 * At this point we have an interrupt that we have to pass
1303 * up to the kernel or qemu; we can't handle it in real mode.
1304 * Thus we have to do a partition switch, so we have to
1305 * collect the other threads, if we are the first thread
1306 * to take an interrupt. To do this, we set the HDEC to 0,
1307 * which causes an HDEC interrupt in all threads within 2ns
1308 * because the HDEC register is shared between all 4 threads.
1309 * However, we don't need to bother if this is an HDEC
1310 * interrupt, since the other threads will already be on their
1311 * way here in that case.
1313 cmpwi r3,0x100 /* Are we the first here? */
1315 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1321 * Send an IPI to any napping threads, since an HDEC interrupt
1322 * doesn't wake CPUs up from nap.
1324 lwz r3,VCORE_NAPPING_THREADS(r5)
1325 lbz r4,HSTATE_PTID(r13)
1328 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1330 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1334 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1337 stbcix r0,r7,r8 /* trigger the IPI */
1339 addi r6,r6,PACA_SIZE
1343 /* Secondary threads wait for primary to do partition switch */
1344 43: ld r5,HSTATE_KVM_VCORE(r13)
1345 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1346 lbz r3,HSTATE_PTID(r13)
1350 13: lbz r3,VCORE_IN_GUEST(r5)
1356 /* Primary thread waits for all the secondaries to exit guest */
1357 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1364 /* Primary thread switches back to host partition */
1365 ld r6,KVM_HOST_SDR1(r4)
1366 lwz r7,KVM_HOST_LPID(r4)
1367 li r8,LPID_RSVD /* switch to reserved LPID */
1370 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1375 /* DPDES is shared between threads */
1376 mfspr r7, SPRN_DPDES
1377 std r7, VCORE_DPDES(r5)
1378 /* clear DPDES so we don't get guest doorbells in the host */
1380 mtspr SPRN_DPDES, r8
1381 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1383 /* Subtract timebase offset from timebase */
1384 ld r8,VCORE_TB_OFFSET(r5)
1387 mftb r6 /* current host timebase */
1389 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1390 mftb r7 /* check if lower 24 bits overflowed */
1395 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1399 17: ld r0, VCORE_PCR(r5)
1405 /* Signal secondary CPUs to continue */
1406 stb r0,VCORE_IN_GUEST(r5)
1407 lis r8,0x7fff /* MAX_INT@h */
1410 16: ld r8,KVM_HOST_LPCR(r4)
1416 * PPC970 guest -> host partition switch code.
1417 * We have to lock against concurrent tlbies, and
1418 * we have to flush the whole TLB.
1420 32: ld r5,HSTATE_KVM_VCORE(r13)
1421 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1423 /* Take the guest's tlbie_lock */
1424 #ifdef __BIG_ENDIAN__
1425 lwz r8,PACA_LOCK_TOKEN(r13)
1427 lwz r8,PACAPACAINDEX(r13)
1429 addi r3,r4,KVM_TLBIE_LOCK
1437 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1439 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1443 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1446 stw r0,0(r3) /* drop guest tlbie_lock */
1448 /* invalidate the whole TLB */
1457 /* take native_tlbie_lock */
1458 ld r3,toc_tlbie_lock@toc(2)
1466 ld r6,KVM_HOST_SDR1(r4)
1467 mtspr SPRN_SDR1,r6 /* switch to host page table */
1469 /* Set up host HID4 value */
1474 stw r0,0(r3) /* drop native_tlbie_lock */
1476 lis r8,0x7fff /* MAX_INT@h */
1479 /* Disable HDEC interrupts */
1482 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1492 /* load host SLB entries */
1493 33: ld r8,PACA_SLBSHADOWPTR(r13)
1495 .rept SLB_NUM_BOLTED
1496 ld r5,SLBSHADOW_SAVEAREA(r8)
1497 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1498 andis. r7,r5,SLB_ESID_V@h
1509 std r5,VCPU_DEC_EXPIRES(r9)
1513 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1514 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1517 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1520 /* Save POWER8-specific registers */
1524 std r5, VCPU_IAMR(r9)
1525 stw r6, VCPU_PSPB(r9)
1526 std r7, VCPU_FSCR(r9)
1531 std r6, VCPU_VTB(r9)
1532 std r7, VCPU_TAR(r9)
1533 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1534 mfspr r5, SPRN_TFHAR
1535 mfspr r6, SPRN_TFIAR
1536 mfspr r7, SPRN_TEXASR
1537 std r5, VCPU_TFHAR(r9)
1538 std r6, VCPU_TFIAR(r9)
1539 std r7, VCPU_TEXASR(r9)
1541 mfspr r8, SPRN_EBBHR
1542 std r8, VCPU_EBBHR(r9)
1543 mfspr r5, SPRN_EBBRR
1544 mfspr r6, SPRN_BESCR
1545 mfspr r7, SPRN_CSIGR
1547 std r5, VCPU_EBBRR(r9)
1548 std r6, VCPU_BESCR(r9)
1549 std r7, VCPU_CSIGR(r9)
1550 std r8, VCPU_TACR(r9)
1551 mfspr r5, SPRN_TCSCR
1555 std r5, VCPU_TCSCR(r9)
1556 std r6, VCPU_ACOP(r9)
1557 stw r7, VCPU_GUEST_PID(r9)
1558 std r8, VCPU_WORT(r9)
1561 /* Save and reset AMR and UAMOR before turning on the MMU */
1566 std r6,VCPU_UAMOR(r9)
1569 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1571 /* Unset guest mode */
1572 li r0, KVM_GUEST_MODE_NONE
1573 stb r0, HSTATE_IN_GUEST(r13)
1575 ld r0, 112+PPC_LR_STKOFF(r1)
1581 * Check whether an HDSI is an HPTE not found fault or something else.
1582 * If it is an HPTE not found fault that is due to the guest accessing
1583 * a page that they have mapped but which we have paged out, then
1584 * we continue on with the guest exit path. In all other cases,
1585 * reflect the HDSI to the guest as a DSI.
1589 mfspr r6, SPRN_HDSISR
1590 /* HPTE not found fault or protection fault? */
1591 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1592 beq 1f /* if not, send it to the guest */
1593 andi. r0, r11, MSR_DR /* data relocation enabled? */
1596 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1597 bne 1f /* if no SLB entry found */
1598 4: std r4, VCPU_FAULT_DAR(r9)
1599 stw r6, VCPU_FAULT_DSISR(r9)
1601 /* Search the hash table. */
1602 mr r3, r9 /* vcpu pointer */
1603 li r7, 1 /* data fault */
1604 bl .kvmppc_hpte_hv_fault
1605 ld r9, HSTATE_KVM_VCPU(r13)
1607 ld r11, VCPU_MSR(r9)
1608 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1609 cmpdi r3, 0 /* retry the instruction */
1611 cmpdi r3, -1 /* handle in kernel mode */
1613 cmpdi r3, -2 /* MMIO emulation; need instr word */
1616 /* Synthesize a DSI for the guest */
1617 ld r4, VCPU_FAULT_DAR(r9)
1619 1: mtspr SPRN_DAR, r4
1620 mtspr SPRN_DSISR, r6
1621 mtspr SPRN_SRR0, r10
1622 mtspr SPRN_SRR1, r11
1623 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1624 ld r11, VCPU_INTR_MSR(r9)
1625 fast_interrupt_c_return:
1626 6: ld r7, VCPU_CTR(r9)
1627 lwz r8, VCPU_XER(r9)
1633 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1634 ld r5, KVM_VRMA_SLB_V(r5)
1637 /* If this is for emulated MMIO, load the instruction word */
1638 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1640 /* Set guest mode to 'jump over instruction' so if lwz faults
1641 * we'll just continue at the next IP. */
1642 li r0, KVM_GUEST_MODE_SKIP
1643 stb r0, HSTATE_IN_GUEST(r13)
1645 /* Do the access with MSR:DR enabled */
1647 ori r4, r3, MSR_DR /* Enable paging for data */
1652 /* Store the result */
1653 stw r8, VCPU_LAST_INST(r9)
1655 /* Unset guest mode. */
1656 li r0, KVM_GUEST_MODE_HOST_HV
1657 stb r0, HSTATE_IN_GUEST(r13)
1661 * Similarly for an HISI, reflect it to the guest as an ISI unless
1662 * it is an HPTE not found fault for a page that we have paged out.
1665 andis. r0, r11, SRR1_ISI_NOPT@h
1667 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1670 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1671 bne 1f /* if no SLB entry found */
1673 /* Search the hash table. */
1674 mr r3, r9 /* vcpu pointer */
1677 li r7, 0 /* instruction fault */
1678 bl .kvmppc_hpte_hv_fault
1679 ld r9, HSTATE_KVM_VCPU(r13)
1681 ld r11, VCPU_MSR(r9)
1682 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1683 cmpdi r3, 0 /* retry the instruction */
1684 beq fast_interrupt_c_return
1685 cmpdi r3, -1 /* handle in kernel mode */
1688 /* Synthesize an ISI for the guest */
1690 1: mtspr SPRN_SRR0, r10
1691 mtspr SPRN_SRR1, r11
1692 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1693 ld r11, VCPU_INTR_MSR(r9)
1694 b fast_interrupt_c_return
1696 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1697 ld r5, KVM_VRMA_SLB_V(r6)
1701 * Try to handle an hcall in real mode.
1702 * Returns to the guest if we handle it, or continues on up to
1703 * the kernel if we can't (i.e. if we don't have a handler for
1704 * it, or if the handler returns H_TOO_HARD).
1706 .globl hcall_try_real_mode
1707 hcall_try_real_mode:
1708 ld r3,VCPU_GPR(R3)(r9)
1710 /* sc 1 from userspace - reflect to guest syscall */
1711 bne sc_1_fast_return
1713 cmpldi r3,hcall_real_table_end - hcall_real_table
1715 LOAD_REG_ADDR(r4, hcall_real_table)
1721 mr r3,r9 /* get vcpu pointer */
1722 ld r4,VCPU_GPR(R4)(r9)
1725 beq hcall_real_fallback
1726 ld r4,HSTATE_KVM_VCPU(r13)
1727 std r3,VCPU_GPR(R3)(r4)
1735 li r10, BOOK3S_INTERRUPT_SYSCALL
1736 ld r11, VCPU_INTR_MSR(r9)
1740 /* We've attempted a real mode hcall, but it's punted it back
1741 * to userspace. We need to restore some clobbered volatiles
1742 * before resuming the pass-it-to-qemu path */
1743 hcall_real_fallback:
1744 li r12,BOOK3S_INTERRUPT_SYSCALL
1745 ld r9, HSTATE_KVM_VCPU(r13)
1749 .globl hcall_real_table
1751 .long 0 /* 0 - unused */
1752 .long .kvmppc_h_remove - hcall_real_table
1753 .long .kvmppc_h_enter - hcall_real_table
1754 .long .kvmppc_h_read - hcall_real_table
1755 .long 0 /* 0x10 - H_CLEAR_MOD */
1756 .long 0 /* 0x14 - H_CLEAR_REF */
1757 .long .kvmppc_h_protect - hcall_real_table
1758 .long 0 /* 0x1c - H_GET_TCE */
1759 .long .kvmppc_h_put_tce - hcall_real_table
1760 .long 0 /* 0x24 - H_SET_SPRG0 */
1761 .long .kvmppc_h_set_dabr - hcall_real_table
1776 #ifdef CONFIG_KVM_XICS
1777 .long .kvmppc_rm_h_eoi - hcall_real_table
1778 .long .kvmppc_rm_h_cppr - hcall_real_table
1779 .long .kvmppc_rm_h_ipi - hcall_real_table
1780 .long 0 /* 0x70 - H_IPOLL */
1781 .long .kvmppc_rm_h_xirr - hcall_real_table
1783 .long 0 /* 0x64 - H_EOI */
1784 .long 0 /* 0x68 - H_CPPR */
1785 .long 0 /* 0x6c - H_IPI */
1786 .long 0 /* 0x70 - H_IPOLL */
1787 .long 0 /* 0x74 - H_XIRR */
1815 .long .kvmppc_h_cede - hcall_real_table
1832 .long .kvmppc_h_bulk_remove - hcall_real_table
1836 .long .kvmppc_h_set_xdabr - hcall_real_table
1837 hcall_real_table_end:
1843 _GLOBAL(kvmppc_h_set_xdabr)
1844 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1846 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1849 6: li r3, H_PARAMETER
1852 _GLOBAL(kvmppc_h_set_dabr)
1853 li r5, DABRX_USER | DABRX_KERNEL
1857 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1858 std r4,VCPU_DABR(r3)
1859 stw r5, VCPU_DABRX(r3)
1860 mtspr SPRN_DABRX, r5
1861 /* Work around P7 bug where DABR can get corrupted on mtspr */
1862 1: mtspr SPRN_DABR,r4
1870 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
1871 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1872 rlwimi r5, r4, 1, DAWRX_WT
1874 std r4, VCPU_DAWR(r3)
1875 std r5, VCPU_DAWRX(r3)
1877 mtspr SPRN_DAWRX, r5
1881 _GLOBAL(kvmppc_h_cede)
1883 std r11,VCPU_MSR(r3)
1885 stb r0,VCPU_CEDED(r3)
1886 sync /* order setting ceded vs. testing prodded */
1887 lbz r5,VCPU_PRODDED(r3)
1889 bne kvm_cede_prodded
1890 li r0,0 /* set trap to 0 to say hcall is handled */
1891 stw r0,VCPU_TRAP(r3)
1893 std r0,VCPU_GPR(R3)(r3)
1895 b kvm_cede_exit /* just send it up to host on 970 */
1896 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1899 * Set our bit in the bitmask of napping threads unless all the
1900 * other threads are already napping, in which case we send this
1903 ld r5,HSTATE_KVM_VCORE(r13)
1904 lbz r6,HSTATE_PTID(r13)
1905 lwz r8,VCORE_ENTRY_EXIT(r5)
1909 addi r6,r5,VCORE_NAPPING_THREADS
1918 stb r0,HSTATE_NAPPING(r13)
1919 /* order napping_threads update vs testing entry_exit_count */
1921 lwz r7,VCORE_ENTRY_EXIT(r5)
1923 bge 33f /* another thread already exiting */
1926 * Although not specifically required by the architecture, POWER7
1927 * preserves the following registers in nap mode, even if an SMT mode
1928 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1929 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1931 /* Save non-volatile GPRs */
1932 std r14, VCPU_GPR(R14)(r3)
1933 std r15, VCPU_GPR(R15)(r3)
1934 std r16, VCPU_GPR(R16)(r3)
1935 std r17, VCPU_GPR(R17)(r3)
1936 std r18, VCPU_GPR(R18)(r3)
1937 std r19, VCPU_GPR(R19)(r3)
1938 std r20, VCPU_GPR(R20)(r3)
1939 std r21, VCPU_GPR(R21)(r3)
1940 std r22, VCPU_GPR(R22)(r3)
1941 std r23, VCPU_GPR(R23)(r3)
1942 std r24, VCPU_GPR(R24)(r3)
1943 std r25, VCPU_GPR(R25)(r3)
1944 std r26, VCPU_GPR(R26)(r3)
1945 std r27, VCPU_GPR(R27)(r3)
1946 std r28, VCPU_GPR(R28)(r3)
1947 std r29, VCPU_GPR(R29)(r3)
1948 std r30, VCPU_GPR(R30)(r3)
1949 std r31, VCPU_GPR(R31)(r3)
1955 * Take a nap until a decrementer or external or doobell interrupt
1956 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
1959 stb r0,HSTATE_HWTHREAD_REQ(r13)
1961 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1963 oris r5,r5,LPCR_PECEDP@h
1964 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1968 std r0, HSTATE_SCRATCH0(r13)
1970 ld r0, HSTATE_SCRATCH0(r13)
1982 /* get vcpu pointer */
1983 ld r4, HSTATE_KVM_VCPU(r13)
1985 /* Woken by external or decrementer interrupt */
1986 ld r1, HSTATE_HOST_R1(r13)
1988 /* load up FP state */
1992 ld r14, VCPU_GPR(R14)(r4)
1993 ld r15, VCPU_GPR(R15)(r4)
1994 ld r16, VCPU_GPR(R16)(r4)
1995 ld r17, VCPU_GPR(R17)(r4)
1996 ld r18, VCPU_GPR(R18)(r4)
1997 ld r19, VCPU_GPR(R19)(r4)
1998 ld r20, VCPU_GPR(R20)(r4)
1999 ld r21, VCPU_GPR(R21)(r4)
2000 ld r22, VCPU_GPR(R22)(r4)
2001 ld r23, VCPU_GPR(R23)(r4)
2002 ld r24, VCPU_GPR(R24)(r4)
2003 ld r25, VCPU_GPR(R25)(r4)
2004 ld r26, VCPU_GPR(R26)(r4)
2005 ld r27, VCPU_GPR(R27)(r4)
2006 ld r28, VCPU_GPR(R28)(r4)
2007 ld r29, VCPU_GPR(R29)(r4)
2008 ld r30, VCPU_GPR(R30)(r4)
2009 ld r31, VCPU_GPR(R31)(r4)
2011 /* Check the wake reason in SRR1 to see why we got here */
2012 bl kvmppc_check_wake_reason
2014 /* clear our bit in vcore->napping_threads */
2015 34: ld r5,HSTATE_KVM_VCORE(r13)
2016 lbz r7,HSTATE_PTID(r13)
2019 addi r6,r5,VCORE_NAPPING_THREADS
2025 stb r0,HSTATE_NAPPING(r13)
2027 /* See if the wake reason means we need to exit */
2028 stw r12, VCPU_TRAP(r4)
2033 /* see if any other thread is already exiting */
2034 lwz r0,VCORE_ENTRY_EXIT(r5)
2038 b kvmppc_cede_reentry /* if not go back to guest */
2040 /* cede when already previously prodded case */
2043 stb r0,VCPU_PRODDED(r3)
2044 sync /* order testing prodded vs. clearing ceded */
2045 stb r0,VCPU_CEDED(r3)
2049 /* we've ceded but we want to give control to the host */
2051 b hcall_real_fallback
2053 /* Try to handle a machine check in real mode */
2054 machine_check_realmode:
2055 mr r3, r9 /* get vcpu pointer */
2056 bl .kvmppc_realmode_machine_check
2058 cmpdi r3, 0 /* continue exiting from guest? */
2059 ld r9, HSTATE_KVM_VCPU(r13)
2060 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2062 /* If not, deliver a machine check. SRR0/1 are already set */
2063 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2064 ld r11, VCPU_INTR_MSR(r9)
2065 b fast_interrupt_c_return
2068 * Check the reason we woke from nap, and take appropriate action.
2070 * 0 if nothing needs to be done
2071 * 1 if something happened that needs to be handled by the host
2072 * -1 if there was a guest wakeup (IPI)
2074 * Also sets r12 to the interrupt vector for any interrupt that needs
2075 * to be handled now by the host (0x500 for external interrupt), or zero.
2077 kvmppc_check_wake_reason:
2080 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2082 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2083 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2084 cmpwi r6, 8 /* was it an external interrupt? */
2085 li r12, BOOK3S_INTERRUPT_EXTERNAL
2086 beq kvmppc_read_intr /* if so, see what it was */
2089 cmpwi r6, 6 /* was it the decrementer? */
2092 cmpwi r6, 5 /* privileged doorbell? */
2094 cmpwi r6, 3 /* hypervisor doorbell? */
2096 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2097 li r3, 1 /* anything else, return 1 */
2100 /* hypervisor doorbell */
2101 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2106 * Determine what sort of external interrupt is pending (if any).
2108 * 0 if no interrupt is pending
2109 * 1 if an interrupt is pending that needs to be handled by the host
2110 * -1 if there was a guest wakeup IPI (which has now been cleared)
2113 /* see if a host IPI is pending */
2115 lbz r0, HSTATE_HOST_IPI(r13)
2119 /* Now read the interrupt from the ICP */
2120 ld r6, HSTATE_XICS_PHYS(r13)
2125 rlwinm. r3, r0, 0, 0xffffff
2127 beq 1f /* if nothing pending in the ICP */
2129 /* We found something in the ICP...
2131 * If it's not an IPI, stash it in the PACA and return to
2132 * the host, we don't (yet) handle directing real external
2133 * interrupts directly to the guest
2135 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2138 /* It's an IPI, clear the MFRR and EOI it */
2141 stbcix r3, r6, r8 /* clear the IPI */
2142 stwcix r0, r6, r7 /* EOI it */
2145 /* We need to re-check host IPI now in case it got set in the
2146 * meantime. If it's clear, we bounce the interrupt to the
2149 lbz r0, HSTATE_HOST_IPI(r13)
2153 /* OK, it's an IPI for us */
2157 42: /* It's not an IPI and it's for the host, stash it in the PACA
2158 * before exit, it will be picked up by the host ICP driver
2160 stw r0, HSTATE_SAVED_XIRR(r13)
2164 43: /* We raced with the host, we need to resend that IPI, bummer */
2166 stbcix r0, r6, r8 /* set the IPI */
2172 * Save away FP, VMX and VSX registers.
2174 * N.B. r30 and r31 are volatile across this function,
2175 * thus it is not callable from C.
2182 #ifdef CONFIG_ALTIVEC
2184 oris r8,r8,MSR_VEC@h
2185 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2189 oris r8,r8,MSR_VSX@h
2190 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2194 addi r3,r3,VCPU_FPRS
2196 #ifdef CONFIG_ALTIVEC
2198 addi r3,r31,VCPU_VRS
2200 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2202 mfspr r6,SPRN_VRSAVE
2203 stw r6,VCPU_VRSAVE(r3)
2210 * Load up FP, VMX and VSX registers
2212 * N.B. r30 and r31 are volatile across this function,
2213 * thus it is not callable from C.
2220 #ifdef CONFIG_ALTIVEC
2222 oris r8,r8,MSR_VEC@h
2223 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2227 oris r8,r8,MSR_VSX@h
2228 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2232 addi r3,r4,VCPU_FPRS
2234 #ifdef CONFIG_ALTIVEC
2236 addi r3,r31,VCPU_VRS
2238 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2240 lwz r7,VCPU_VRSAVE(r4)
2241 mtspr SPRN_VRSAVE,r7
2247 * We come here if we get any exception or interrupt while we are
2248 * executing host real mode code while in guest MMU context.
2249 * For now just spin, but we should do something better.
2251 kvmppc_bad_host_intr: