powerpc/64s/hash: Fix hash_preload running with interrupts enabled
authorNicholas Piggin <npiggin@gmail.com>
Mon, 27 Jul 2020 06:09:47 +0000 (16:09 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 27 Jul 2020 07:02:09 +0000 (17:02 +1000)
Commit 2f92447f9f96 ("powerpc/book3s64/hash: Use the pte_t address from the
caller") removed the local_irq_disable from hash_preload, but it was
required for more than just the page table walk: the hash pte busy bit is
effectively a lock which may be taken in interrupt context, and the local
update flag test must not be preempted before it's used.

This solves apparent lockups with perf interrupting __hash_page_64K. If
get_perf_callchain then also takes a hash fault on the same page while it
is already locked, it will loop forever taking hash faults, which looks like
this:

  cpu 0x49e: Vector: 100 (System Reset) at [c00000001a4f7d70]
      pc: c000000000072dc8: hash_page_mm+0x8/0x800
      lr: c00000000000c5a4: do_hash_page+0x24/0x38
      sp: c0002ac1cc69ac70
     msr: 8000000000081033
    current = 0xc0002ac1cc602e00
    paca    = 0xc00000001de1f280   irqmask: 0x03   irq_happened: 0x01
      pid   = 20118, comm = pread2_processe
  Linux version 5.8.0-rc6-00345-g1fad14f18bc6
  49e:mon> t
  [c0002ac1cc69ac70c00000000000c5a4 do_hash_page+0x24/0x38 (unreliable)
  --- Exception: 300 (Data Access) at c00000000008fa60 __copy_tofrom_user_power7+0x20c/0x7ac
  [link register   ] c000000000335d10 copy_from_user_nofault+0xf0/0x150
  [c0002ac1cc69af70c00032bf9fa3c880 (unreliable)
  [c0002ac1cc69afa0c000000000109df0 read_user_stack_64+0x70/0xf0
  [c0002ac1cc69afd0c000000000109fcc perf_callchain_user_64+0x15c/0x410
  [c0002ac1cc69b060c000000000109c00 perf_callchain_user+0x20/0x40
  [c0002ac1cc69b080c00000000031c6cc get_perf_callchain+0x25c/0x360
  [c0002ac1cc69b120c000000000316b50 perf_callchain+0x70/0xa0
  [c0002ac1cc69b140c000000000316ddc perf_prepare_sample+0x25c/0x790
  [c0002ac1cc69b1a0c000000000317350 perf_event_output_forward+0x40/0xb0
  [c0002ac1cc69b220c000000000306138 __perf_event_overflow+0x88/0x1a0
  [c0002ac1cc69b270c00000000010cf70 record_and_restart+0x230/0x750
  [c0002ac1cc69b620c00000000010d69c perf_event_interrupt+0x20c/0x510
  [c0002ac1cc69b730c000000000027d9c performance_monitor_exception+0x4c/0x60
  [c0002ac1cc69b750c00000000000b2f8 performance_monitor_common_virt+0x1b8/0x1c0
  --- Exception: f00 (Performance Monitor) at c0000000000cb5b0 pSeries_lpar_hpte_insert+0x0/0x160
  [link register   ] c0000000000846f0 __hash_page_64K+0x210/0x540
  [c0002ac1cc69ba500000000000000000 (unreliable)
  [c0002ac1cc69bb00c000000000073ae0 update_mmu_cache+0x390/0x3a0
  [c0002ac1cc69bb70c00000000037f024 wp_page_copy+0x364/0xce0
  [c0002ac1cc69bc20c00000000038272c do_wp_page+0xdc/0xa60
  [c0002ac1cc69bc70c0000000003857bc handle_mm_fault+0xb9c/0x1b60
  [c0002ac1cc69bd50c00000000006c434 __do_page_fault+0x314/0xc90
  [c0002ac1cc69be20c00000000000c5c8 handle_page_fault+0x10/0x2c
  --- Exception: 300 (Data Access) at 00007fff8c861fe8
  SP (7ffff6b19660) is in userspace

Fixes: 2f92447f9f96 ("powerpc/book3s64/hash: Use the pte_t address from the caller")
Reported-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@ozlabs.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200727060947.10060-1-npiggin@gmail.com
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/perf/core-book3s.c

index 0fc8bad..446e54c 100644 (file)
@@ -3072,10 +3072,18 @@ do_hash_page:
        ori     r0,r0,DSISR_BAD_FAULT_64S@l
        and.    r0,r5,r0                /* weird error? */
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
+
+       /*
+        * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+        * don't call hash_page, just fail the fault. This is required to
+        * prevent re-entrancy problems in the hash code, namely perf
+        * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+        * hash fault. See the comment in hash_preload().
+        */
        ld      r11, PACA_THREAD_INFO(r13)
-       lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
-       andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
-       bne     77f                     /* then don't call hash_page now */
+       lwz     r0,TI_PREEMPT(r11)
+       andis.  r0,r0,NMI_MASK@h
+       bne     77f
 
        /*
         * r3 contains the trap number
index 468169e..9b9f92a 100644 (file)
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
        pgd_t *pgdir;
        int rc, ssize, update_flags = 0;
        unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+       unsigned long flags;
 
        BUG_ON(get_region_id(ea) != USER_REGION_ID);
 
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                return;
 #endif /* CONFIG_PPC_64K_PAGES */
 
+       /*
+        * __hash_page_* must run with interrupts off, as it sets the
+        * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+        * time and may take a hash fault reading the user stack, see
+        * read_user_stack_slow() in the powerpc/perf code.
+        *
+        * If that takes a hash fault on the same page as we lock here, it
+        * will bail out when seeing H_PAGE_BUSY set, and retry the access
+        * leading to an infinite loop.
+        *
+        * Disabling interrupts here does not prevent perf interrupts, but it
+        * will prevent them taking hash faults (see the NMI test in
+        * do_hash_page), then read_user_stack's copy_from_user_nofault will
+        * fail and perf will fall back to read_user_stack_slow(), which
+        * walks the Linux page tables.
+        *
+        * Interrupts must also be off for the duration of the
+        * mm_is_thread_local test and update, to prevent preempt running the
+        * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+        */
+       local_irq_save(flags);
+
        /* Is that local to this CPU ? */
        if (mm_is_thread_local(mm))
                update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                                   mm_ctx_user_psize(&mm->context),
                                   mm_ctx_user_psize(&mm->context),
                                   pte_val(*ptep));
+
+       local_irq_restore(flags);
 }
 
 /*
index cd6a742..01d7028 100644 (file)
@@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
 
        perf_read_regs(regs);
 
+       /*
+        * If perf interrupts hit in a local_irq_disable (soft-masked) region,
+        * we consider them as NMIs. This is required to prevent hash faults on
+        * user addresses when reading callchains. See the NMI test in
+        * do_hash_page.
+        */
        nmi = perf_intr_is_nmi(regs);
        if (nmi)
                nmi_enter();