powerpc: allow pte_offset_map[_lock]() to fail
authorHugh Dickins <hughd@google.com>
Thu, 8 Jun 2023 19:23:35 +0000 (12:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 19 Jun 2023 23:19:08 +0000 (16:19 -0700)
In rare transient cases, not yet made possible, pte_offset_map() and
pte_offset_map_lock() may not find a page table: handle appropriately.
Balance successful pte_offset_map() with pte_unmap() where omitted.

Link: https://lkml.kernel.org/r/54c8b578-ca9-a0f-bfd2-d72976f8d73a@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: John David Anglin <dave.anglin@bell.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/mm/book3s64/hash_tlb.c
arch/powerpc/mm/book3s64/subpage_prot.c
arch/powerpc/xmon/xmon.c

index a64ea0a..21fcad9 100644 (file)
@@ -239,12 +239,16 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
        local_irq_save(flags);
        arch_enter_lazy_mmu_mode();
        start_pte = pte_offset_map(pmd, addr);
+       if (!start_pte)
+               goto out;
        for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
                unsigned long pteval = pte_val(*pte);
                if (pteval & H_PAGE_HASHPTE)
                        hpte_need_flush(mm, addr, pte, pteval, 0);
                addr += PAGE_SIZE;
        }
+       pte_unmap(start_pte);
+out:
        arch_leave_lazy_mmu_mode();
        local_irq_restore(flags);
 }
index b75a9fb..0dc8555 100644 (file)
@@ -71,6 +71,8 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
        if (pmd_none(*pmd))
                return;
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+               return;
        arch_enter_lazy_mmu_mode();
        for (; npages > 0; --npages) {
                pte_update(mm, addr, pte, 0, 0, 0);
index 728d3c2..69447bd 100644 (file)
@@ -3376,12 +3376,15 @@ static void show_pte(unsigned long addr)
        printf("pmdp @ 0x%px = 0x%016lx\n", pmdp, pmd_val(*pmdp));
 
        ptep = pte_offset_map(pmdp, addr);
-       if (pte_none(*ptep)) {
+       if (!ptep || pte_none(*ptep)) {
+               if (ptep)
+                       pte_unmap(ptep);
                printf("no valid PTE\n");
                return;
        }
 
        format_pte(ptep, pte_val(*ptep));
+       pte_unmap(ptep);
 
        sync();
        __delay(200);