powerpc/64s: power4 nap fixup in C
authorNicholas Piggin <npiggin@gmail.com>
Tue, 6 Apr 2021 02:55:08 +0000 (12:55 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 8 Apr 2021 11:17:45 +0000 (21:17 +1000)
There is no need for this to be in asm, use the new intrrupt entry wrapper.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Andreas Schwab <schwab@linux-m68k.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210406025508.821718-1-npiggin@gmail.com
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_book3s.S

index 7c63389..05e7fc4 100644 (file)
@@ -9,6 +9,17 @@
 #include <asm/kprobes.h>
 #include <asm/runlatch.h>
 
+static inline void nap_adjust_return(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_970_NAP
+       if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
+               /* Can avoid a test-and-clear because NMIs do not call this */
+               clear_thread_local_flags(_TLF_NAPPING);
+               regs->nip = (unsigned long)power4_idle_nap_return;
+       }
+#endif
+}
+
 struct interrupt_state {
 #ifdef CONFIG_PPC_BOOK3E_64
        enum ctx_state ctx_state;
@@ -124,6 +135,14 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
 
 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
 {
+       /*
+        * Adjust at exit so the main handler sees the true NIA. This must
+        * come before irq_exit() because irq_exit can enable interrupts, and
+        * if another interrupt is taken before nap_adjust_return has run
+        * here, then that interrupt would return directly to idle nap return.
+        */
+       nap_adjust_return(regs);
+
        irq_exit();
        interrupt_exit_prepare(regs, state);
 }
@@ -179,6 +198,11 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
                        radix_enabled() || (mfmsr() & MSR_DR))
                nmi_exit();
 
+       /*
+        * nmi does not call nap_adjust_return because nmi should not create
+        * new work to do (must use irq_work for that).
+        */
+
 #ifdef CONFIG_PPC64
        if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
                this_cpu_set_ftrace_enabled(state->ftrace_enabled);
index ad36e85..7bf8a15 100644 (file)
@@ -389,6 +389,7 @@ extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
 extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
 #ifdef CONFIG_PPC_970_NAP
 extern void power4_idle_nap(void);
+void power4_idle_nap_return(void);
 #endif
 
 extern unsigned long cpuidle_disable;
index 9d64024..b4ec6c7 100644 (file)
@@ -151,6 +151,12 @@ void arch_setup_new_exec(void);
 
 #ifndef __ASSEMBLY__
 
+static inline void clear_thread_local_flags(unsigned int flags)
+{
+       struct thread_info *ti = current_thread_info();
+       ti->local_flags &= ~flags;
+}
+
 static inline bool test_thread_local_flags(unsigned int flags)
 {
        struct thread_info *ti = current_thread_info();
index 8082b69..0cdb59e 100644 (file)
@@ -692,25 +692,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        ld      r1,GPR1(r1)
 .endm
 
-/*
- * When the idle code in power4_idle puts the CPU into NAP mode,
- * it has to do so in a loop, and relies on the external interrupt
- * and decrementer interrupt entry code to get it out of the loop.
- * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
- * to signal that it is in the loop and needs help to get out.
- */
-#ifdef CONFIG_PPC_970_NAP
-#define FINISH_NAP                             \
-BEGIN_FTR_SECTION                              \
-       ld      r11, PACA_THREAD_INFO(r13);     \
-       ld      r9,TI_LOCAL_FLAGS(r11);         \
-       andi.   r10,r9,_TLF_NAPPING;            \
-       bnel    power4_fixup_nap;               \
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#else
-#define FINISH_NAP
-#endif
-
 /*
  * There are a few constraints to be concerned with.
  * - Real mode exceptions code/data must be located at their physical location.
@@ -1248,7 +1229,6 @@ EXC_COMMON_BEGIN(machine_check_common)
         */
        GEN_COMMON machine_check
 
-       FINISH_NAP
        /* Enable MSR_RI when finished with PACA_EXMC */
        li      r10,MSR_RI
        mtmsrd  r10,1
@@ -1571,7 +1551,6 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
 EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
 EXC_COMMON_BEGIN(hardware_interrupt_common)
        GEN_COMMON hardware_interrupt
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_IRQ
        b       interrupt_return
@@ -1801,7 +1780,6 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
 EXC_VIRT_END(decrementer, 0x4900, 0x80)
 EXC_COMMON_BEGIN(decrementer_common)
        GEN_COMMON decrementer
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      timer_interrupt
        b       interrupt_return
@@ -1886,7 +1864,6 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
 EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
 EXC_COMMON_BEGIN(doorbell_super_common)
        GEN_COMMON doorbell_super
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_DOORBELL
        bl      doorbell_exception
@@ -2237,7 +2214,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
 
 EXC_COMMON_BEGIN(hmi_exception_common)
        GEN_COMMON hmi_exception
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      handle_hmi_exception
        b       interrupt_return
@@ -2266,7 +2242,6 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
 EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
 EXC_COMMON_BEGIN(h_doorbell_common)
        GEN_COMMON h_doorbell
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_PPC_DOORBELL
        bl      doorbell_exception
@@ -2299,7 +2274,6 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
 EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
 EXC_COMMON_BEGIN(h_virt_irq_common)
        GEN_COMMON h_virt_irq
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_IRQ
        b       interrupt_return
@@ -2345,7 +2319,6 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
 EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
 EXC_COMMON_BEGIN(performance_monitor_common)
        GEN_COMMON performance_monitor
-       FINISH_NAP
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      performance_monitor_exception
        b       interrupt_return
@@ -3096,24 +3069,6 @@ USE_FIXED_SECTION(virt_trampolines)
 __end_interrupts:
 DEFINE_FIXED_SYMBOL(__end_interrupts)
 
-#ifdef CONFIG_PPC_970_NAP
-       /*
-        * Called by exception entry code if _TLF_NAPPING was set, this clears
-        * the NAPPING flag, and redirects the exception exit to
-        * power4_fixup_nap_return.
-        */
-       .globl power4_fixup_nap
-EXC_COMMON_BEGIN(power4_fixup_nap)
-       andc    r9,r9,r10
-       std     r9,TI_LOCAL_FLAGS(r11)
-       LOAD_REG_ADDR(r10, power4_idle_nap_return)
-       std     r10,_NIP(r1)
-       blr
-
-power4_idle_nap_return:
-       blr
-#endif
-
 CLOSE_FIXED_SECTION(real_vectors);
 CLOSE_FIXED_SECTION(real_trampolines);
 CLOSE_FIXED_SECTION(virt_vectors);
index f9e6d83..abb719b 100644 (file)
@@ -209,4 +209,8 @@ _GLOBAL(power4_idle_nap)
        mtmsrd  r7
        isync
        b       1b
+
+       .globl power4_idle_nap_return
+power4_idle_nap_return:
+       blr
 #endif