x86/sev-es: Compile early handler code into kernel image
[linux-2.6-microblaze.git] / arch / powerpc / kvm / book3s_interrupts.S
index 607a9b9..25a3679 100644 (file)
@@ -55,8 +55,7 @@
  ****************************************************************************/
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
        /* Save host state to the stack */
        PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-       /* Save r3 (kvm_run) and r4 (vcpu) */
-       SAVE_2GPRS(3, r1)
+       /* Save r3 (vcpu) */
+       SAVE_GPR(3, r1)
 
        /* Save non-volatile registers (r14 - r31) */
        SAVE_NVGPRS(r1)
@@ -82,47 +81,46 @@ kvm_start_entry:
        PPC_STL r0, _LINK(r1)
 
        /* Load non-volatile guest state from the vcpu */
-       VCPU_LOAD_NVGPRS(r4)
+       VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
        /* Copy registers into shadow vcpu so we can access them in real mode */
-       mr      r3, r4
        bl      FUNC(kvmppc_copy_to_svcpu)
        nop
-       REST_GPR(4, r1)
+       REST_GPR(3, r1)
 
 #ifdef CONFIG_PPC_BOOK3S_64
        /* Get the dcbz32 flag */
-       PPC_LL  r3, VCPU_HFLAGS(r4)
-       rldicl  r3, r3, 0, 63           /* r3 &= 1 */
-       stb     r3, HSTATE_RESTORE_HID5(r13)
+       PPC_LL  r0, VCPU_HFLAGS(r3)
+       rldicl  r0, r0, 0, 63           /* r3 &= 1 */
+       stb     r0, HSTATE_RESTORE_HID5(r13)
 
        /* Load up guest SPRG3 value, since it's user readable */
-       lwz     r3, VCPU_SHAREDBE(r4)
-       cmpwi   r3, 0
-       ld      r5, VCPU_SHARED(r4)
+       lbz     r4, VCPU_SHAREDBE(r3)
+       cmpwi   r4, 0
+       ld      r5, VCPU_SHARED(r3)
        beq     sprg3_little_endian
 sprg3_big_endian:
 #ifdef __BIG_ENDIAN__
-       ld      r3, VCPU_SHARED_SPRG3(r5)
+       ld      r4, VCPU_SHARED_SPRG3(r5)
 #else
        addi    r5, r5, VCPU_SHARED_SPRG3
-       ldbrx   r3, 0, r5
+       ldbrx   r4, 0, r5
 #endif
        b       after_sprg3_load
 sprg3_little_endian:
 #ifdef __LITTLE_ENDIAN__
-       ld      r3, VCPU_SHARED_SPRG3(r5)
+       ld      r4, VCPU_SHARED_SPRG3(r5)
 #else
        addi    r5, r5, VCPU_SHARED_SPRG3
-       ldbrx   r3, 0, r5
+       ldbrx   r4, 0, r5
 #endif
 
 after_sprg3_load:
-       mtspr   SPRN_SPRG3, r3
+       mtspr   SPRN_SPRG3, r4
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
-       PPC_LL  r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
+       PPC_LL  r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */
 
        /* Jump to segment patching handler and into our guest */
        bl      FUNC(kvmppc_entry_trampoline)
@@ -146,7 +144,7 @@ after_sprg3_load:
         *
         */
 
-       PPC_LL  r3, GPR4(r1)            /* vcpu pointer */
+       PPC_LL  r3, GPR3(r1)            /* vcpu pointer */
 
        /*
         * kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -169,7 +167,7 @@ after_sprg3_load:
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
        /* R7 = vcpu */
-       PPC_LL  r7, GPR4(r1)
+       PPC_LL  r7, GPR3(r1)
 
        PPC_STL r14, VCPU_GPR(R14)(r7)
        PPC_STL r15, VCPU_GPR(R15)(r7)
@@ -190,11 +188,11 @@ after_sprg3_load:
        PPC_STL r30, VCPU_GPR(R30)(r7)
        PPC_STL r31, VCPU_GPR(R31)(r7)
 
-       /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-       lwz     r5, VCPU_TRAP(r7)
+       /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
+       lwz     r4, VCPU_TRAP(r7)
 
-       /* Restore r3 (kvm_run) and r4 (vcpu) */
-       REST_2GPRS(3, r1)
+       /* Restore r3 (vcpu) */
+       REST_GPR(3, r1)
        bl      FUNC(kvmppc_handle_exit_pr)
 
        /* If RESUME_GUEST, get back in the loop */
@@ -223,11 +221,11 @@ kvm_loop_heavyweight:
        PPC_LL  r4, _LINK(r1)
        PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
 
-       /* Load vcpu and cpu_run */
-       REST_2GPRS(3, r1)
+       /* Load vcpu */
+       REST_GPR(3, r1)
 
        /* Load non-volatile guest state from the vcpu */
-       VCPU_LOAD_NVGPRS(r4)
+       VCPU_LOAD_NVGPRS(r3)
 
        /* Jump back into the beginning of this function */
        b       kvm_start_lightweight
@@ -235,7 +233,7 @@ kvm_loop_heavyweight:
 kvm_loop_lightweight:
 
        /* We'll need the vcpu pointer */
-       REST_GPR(4, r1)
+       REST_GPR(3, r1)
 
        /* Jump back into the beginning of this function */
        b       kvm_start_lightweight