powerpc/64s: Tidy machine check SLB logging
authorNicholas Piggin <npiggin@gmail.com>
Sat, 28 Nov 2020 07:07:28 +0000 (17:07 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 3 Dec 2020 14:01:23 +0000 (01:01 +1100)
Since ISA v3.0, SLB no longer uses the slb_cache, and stab_rr is no
longer correlated with SLB allocation. Move those to pre-3.0.

While here, improve some alignments and reduce whitespace.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201128070728.825934-9-npiggin@gmail.com
arch/powerpc/mm/book3s64/slb.c

index c30fcbf..6d720c1 100644 (file)
@@ -255,7 +255,6 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
                return;
 
        pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
-       pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
 
        for (i = 0; i < mmu_slb_size; i++) {
                e = slb_ptr->esid;
@@ -265,34 +264,38 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
                if (!e && !v)
                        continue;
 
-               pr_err("%02d %016lx %016lx\n", i, e, v);
+               pr_err("%02d %016lx %016lx %s\n", i, e, v,
+                               (e & SLB_ESID_V) ? "VALID" : "NOT VALID");
 
-               if (!(e & SLB_ESID_V)) {
-                       pr_err("\n");
+               if (!(e & SLB_ESID_V))
                        continue;
-               }
+
                llp = v & SLB_VSID_LLP;
                if (v & SLB_VSID_B_1T) {
-                       pr_err("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx\n",
+                       pr_err("     1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
                               GET_ESID_1T(e),
                               (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
                } else {
-                       pr_err(" 256M ESID=%9lx  VSID=%13lx LLP:%3lx\n",
+                       pr_err("   256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
                               GET_ESID(e),
                               (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
                }
        }
-       pr_err("----------------------------------\n");
-
-       /* Dump slb cache entires as well. */
-       pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
-       pr_err("Valid SLB cache entries:\n");
-       n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
-       for (i = 0; i < n; i++)
-               pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
-       pr_err("Rest of SLB cache entries:\n");
-       for (i = n; i < SLB_CACHE_ENTRIES; i++)
-               pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+
+       if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
+               /* RR is not so useful as it's often not used for allocation */
+               pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr);
+
+               /* Dump slb cache entires as well. */
+               pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
+               pr_err("Valid SLB cache entries:\n");
+               n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
+               for (i = 0; i < n; i++)
+                       pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+               pr_err("Rest of SLB cache entries:\n");
+               for (i = n; i < SLB_CACHE_ENTRIES; i++)
+                       pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+       }
 }
 
 void slb_vmalloc_update(void)