powerpc: Fix transactional FP/VMX/VSX unavailable handlers
[linux-2.6-microblaze.git] / arch / powerpc / kernel / traps.c
index 62c3dd8..33cd7a0 100644 (file)
@@ -285,6 +285,21 @@ void system_reset_exception(struct pt_regs *regs)
 
        /* What should we do here? We could issue a shutdown or hard reset. */
 }
+
+/*
+ * This function is called in real mode. Strictly no printk's please.
+ *
+ * regs->nip and regs->msr contains srr0 and ssr1.
+ */
+long machine_check_early(struct pt_regs *regs)
+{
+       long handled = 0;
+
+       if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+               handled = cur_cpu_spec->machine_check_early(regs);
+       return handled;
+}
+
 #endif
 
 /*
@@ -1384,7 +1399,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
 
        TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
                 regs->nip, regs->msr);
-       tm_enable();
 
         /* We can only have got here if the task started using FP after
          * beginning the transaction.  So, the transactional regs are just a
@@ -1393,8 +1407,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
          * transaction, and probably retry but now with FP enabled.  So the
          * checkpointed FP registers need to be loaded.
         */
-       tm_reclaim(&current->thread, current->thread.regs->msr,
-                  TM_CAUSE_FAC_UNAV);
+       tm_reclaim_current(TM_CAUSE_FAC_UNAV);
        /* Reclaim didn't save out any FPRs to transact_fprs. */
 
        /* Enable FP for the task: */
@@ -1403,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
        /* This loads and recheckpoints the FP registers from
         * thread.fpr[].  They will remain in registers after the
         * checkpoint so we don't need to reload them after.
+        * If VMX is in use, the VRs now hold checkpointed values,
+        * so we don't want to load the VRs from the thread_struct.
         */
-       tm_recheckpoint(&current->thread, regs->msr);
+       tm_recheckpoint(&current->thread, MSR_FP);
+
+       /* If VMX is in use, get the transactional values back */
+       if (regs->msr & MSR_VEC) {
+               do_load_up_transact_altivec(&current->thread);
+               /* At this point all the VSX state is loaded, so enable it */
+               regs->msr |= MSR_VSX;
+       }
 }
 
-#ifdef CONFIG_ALTIVEC
 void altivec_unavailable_tm(struct pt_regs *regs)
 {
        /* See the comments in fp_unavailable_tm().  This function operates
@@ -1417,18 +1438,21 @@ void altivec_unavailable_tm(struct pt_regs *regs)
        TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
                 "MSR=%lx\n",
                 regs->nip, regs->msr);
-       tm_enable();
-       tm_reclaim(&current->thread, current->thread.regs->msr,
-                  TM_CAUSE_FAC_UNAV);
+       tm_reclaim_current(TM_CAUSE_FAC_UNAV);
        regs->msr |= MSR_VEC;
-       tm_recheckpoint(&current->thread, regs->msr);
+       tm_recheckpoint(&current->thread, MSR_VEC);
        current->thread.used_vr = 1;
+
+       if (regs->msr & MSR_FP) {
+               do_load_up_transact_fpu(&current->thread);
+               regs->msr |= MSR_VSX;
+       }
 }
-#endif
 
-#ifdef CONFIG_VSX
 void vsx_unavailable_tm(struct pt_regs *regs)
 {
+       unsigned long orig_msr = regs->msr;
+
        /* See the comments in fp_unavailable_tm().  This works similarly,
         * though we're loading both FP and VEC registers in here.
         *
@@ -1440,18 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
                 "MSR=%lx\n",
                 regs->nip, regs->msr);
 
-       tm_enable();
+       current->thread.used_vsr = 1;
+
+       /* If FP and VMX are already loaded, we have all the state we need */
+       if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
+               regs->msr |= MSR_VSX;
+               return;
+       }
+
        /* This reclaims FP and/or VR regs if they're already enabled */
-       tm_reclaim(&current->thread, current->thread.regs->msr,
-                  TM_CAUSE_FAC_UNAV);
+       tm_reclaim_current(TM_CAUSE_FAC_UNAV);
 
        regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
                MSR_VSX;
-       /* This loads & recheckpoints FP and VRs. */
-       tm_recheckpoint(&current->thread, regs->msr);
-       current->thread.used_vsr = 1;
+
+       /* This loads & recheckpoints FP and VRs; but we have
+        * to be sure not to overwrite previously-valid state.
+        */
+       tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
+
+       if (orig_msr & MSR_FP)
+               do_load_up_transact_fpu(&current->thread);
+       if (orig_msr & MSR_VEC)
+               do_load_up_transact_altivec(&current->thread);
 }
-#endif
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 void performance_monitor_exception(struct pt_regs *regs)
@@ -1529,7 +1565,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
         * back on or not.
         */
        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
-           current->thread.debug.dbcr1))
+                              current->thread.debug.dbcr1))
                regs->msr |= MSR_DE;
        else
                /* Make sure the IDM flag is off */