powerpc/32: replace LOAD_MSR_KERNEL() by LOAD_REG_IMMEDIATE()
authorChristophe Leroy <christophe.leroy@c-s.fr>
Tue, 20 Aug 2019 14:34:13 +0000 (14:34 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 27 Aug 2019 03:03:36 +0000 (13:03 +1000)
LOAD_MSR_KERNEL() and LOAD_REG_IMMEDIATE() are doing the same thing
in the same way. Drop LOAD_MSR_KERNEL()

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8f04a6df0bc8949517fd8236d50c15008ccf9231.1566311636.git.christophe.leroy@c-s.fr
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/head_32.h

index 54fab22..972b055 100644 (file)
@@ -230,7 +230,7 @@ transfer_to_handler_cont:
         */
        lis     r12,reenable_mmu@h
        ori     r12,r12,reenable_mmu@l
-       LOAD_MSR_KERNEL(r0, MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
        mtspr   SPRN_SRR0,r12
        mtspr   SPRN_SRR1,r0
        SYNC
@@ -304,7 +304,7 @@ stack_ovf:
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
        lis     r9,StackOverflow@ha
        addi    r9,r9,StackOverflow@l
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
        mtspr   SPRN_NRI, r0
 #endif
@@ -324,7 +324,7 @@ trace_syscall_entry_irq_off:
        bl      trace_hardirqs_on
 
        /* Now enable for real */
-       LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
        mtmsr   r10
 
        REST_GPR(0, r1)
@@ -394,7 +394,7 @@ ret_from_syscall:
 #endif
        mr      r6,r3
        /* disable interrupts so current_thread_info()->flags can't change */
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)      /* doesn't include MSR_EE */
        /* Note: We don't bother telling lockdep about it */
        SYNC
        MTMSRD(r10)
@@ -824,7 +824,7 @@ ret_from_except:
         * can't change between when we test it and when we return
         * from the interrupt. */
        /* Note: We don't bother telling lockdep about it */
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
        SYNC                    /* Some chip revs have problems here... */
        MTMSRD(r10)             /* disable interrupts */
 
@@ -991,7 +991,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
         * can restart the exception exit path at the label
         * exc_exit_restart below.  -- paulus
         */
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
        SYNC
        MTMSRD(r10)             /* clear the RI bit */
        .globl exc_exit_restart
@@ -1066,7 +1066,7 @@ exc_exit_restart_end:
        REST_NVGPRS(r1);                                                \
        lwz     r3,_MSR(r1);                                            \
        andi.   r3,r3,MSR_PR;                                           \
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);                             \
        bne     user_exc_return;                                        \
        lwz     r0,GPR0(r1);                                            \
        lwz     r2,GPR2(r1);                                            \
@@ -1236,7 +1236,7 @@ recheck:
         * neither. Those disable/enable cycles used to peek at
         * TI_FLAGS aren't advertised.
         */
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
        SYNC
        MTMSRD(r10)             /* disable interrupts */
        lwz     r9,TI_FLAGS(r2)
@@ -1329,7 +1329,7 @@ _GLOBAL(enter_rtas)
        lwz     r4,RTASBASE(r4)
        mfmsr   r9
        stw     r9,8(r1)
-       LOAD_MSR_KERNEL(r0,MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
        SYNC                    /* disable interrupts so SRR0/1 */
        MTMSRD(r0)              /* don't get trashed */
        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
index 4a69255..8abc778 100644 (file)
@@ -4,19 +4,6 @@
 
 #include <asm/ptrace.h>        /* for STACK_FRAME_REGS_MARKER */
 
-/*
- * MSR_KERNEL is > 0x8000 on 4xx/Book-E since it include MSR_CE.
- */
-.macro __LOAD_MSR_KERNEL r, x
-.if \x >= 0x8000
-       lis \r, (\x)@h
-       ori \r, \r, (\x)@l
-.else
-       li \r, (\x)
-.endif
-.endm
-#define LOAD_MSR_KERNEL(r, x) __LOAD_MSR_KERNEL r, x
-
 /*
  * Exception entry code.  This code runs with address translation
  * turned off, i.e. using physical addresses.
@@ -92,7 +79,7 @@
 #ifdef CONFIG_40x
        rlwinm  r9,r9,0,14,12           /* clear MSR_WE (necessary?) */
 #else
-       LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
        MTMSRD(r10)                     /* (except for mach check in rtas) */
 #endif
        lis     r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
         * otherwise we might risk taking an interrupt before we tell lockdep
         * they are enabled.
         */
-       LOAD_MSR_KERNEL(r10, MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
        rlwimi  r10, r9, 0, MSR_EE
 #else
-       LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
 #endif
 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
        mtspr   SPRN_NRI, r0
@@ -187,7 +174,7 @@ label:
 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret)          \
        li      r10,trap;                                       \
        stw     r10,_TRAP(r11);                                 \
-       LOAD_MSR_KERNEL(r10, msr);                              \
+       LOAD_REG_IMMEDIATE(r10, msr);                           \
        bl      tfer;                                           \
        .long   hdlr;                                           \
        .long   ret