Merge branch 'fixes' into next
authorMichael Ellerman <mpe@ellerman.id.au>
Tue, 26 May 2020 12:56:03 +0000 (22:56 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 26 May 2020 12:56:03 +0000 (22:56 +1000)
Merge our fixes branch from this cycle. It contains several important
fixes we need in next for testing purposes, and also some that will
conflict with upcoming changes.

16 files changed:
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/hash.h
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/head_40x.S
arch/powerpc/kernel/ima_arch.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/syscall_64.c
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/mm/book3s32/hash_low.S
arch/powerpc/platforms/Kconfig.cputype
lib/mpi/longlong.h

index 752dedd..1e69cc2 100644 (file)
@@ -130,7 +130,7 @@ config PPC
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
-       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+       select ARCH_HAS_STRICT_KERNEL_RWX       if (PPC32 && !HIBERNATION)
        select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UACCESS_FLUSHCACHE
        select ARCH_HAS_UACCESS_MCSAFE          if PPC64
index 34a7215..2a0a467 100644 (file)
@@ -17,9 +17,9 @@
  * updating the accessed and modified bits in the page table tree.
  */
 
-#define _PAGE_USER     0x001   /* usermode access allowed */
-#define _PAGE_RW       0x002   /* software: user write access allowed */
-#define _PAGE_PRESENT  0x004   /* software: pte contains a translation */
+#define _PAGE_PRESENT  0x001   /* software: pte contains a translation */
+#define _PAGE_HASHPTE  0x002   /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER     0x004   /* usermode access allowed */
 #define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
 #define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
 #define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
@@ -27,7 +27,7 @@
 #define _PAGE_DIRTY    0x080   /* C: page changed */
 #define _PAGE_ACCESSED 0x100   /* R: page referenced */
 #define _PAGE_EXEC     0x200   /* software: exec allowed */
-#define _PAGE_HASHPTE  0x400   /* hash_page has made an HPTE for this pte */
+#define _PAGE_RW       0x400   /* software: user write access allowed */
 #define _PAGE_SPECIAL  0x800   /* software: Special page */
 
 #ifdef CONFIG_PTE_64BIT
index 1617e73..5a267b7 100644 (file)
@@ -75,7 +75,7 @@
 
 .macro kuap_check      current, gpr
 #ifdef CONFIG_PPC_KUAP_DEBUG
-       lwz     \gpr2, KUAP(thread)
+       lwz     \gpr, KUAP(thread)
 999:   twnei   \gpr, 0
        EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
 #endif
index e0e7177..3a0db7b 100644 (file)
@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void)
        }                                                               \
 } while(0)
 
+static inline bool __lazy_irq_pending(u8 irq_happened)
+{
+       return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
+ */
 static inline bool lazy_irq_pending(void)
 {
-       return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+       return __lazy_irq_pending(get_paca()->irq_happened);
+}
+
+/*
+ * Check if a lazy IRQ is pending, with no debugging checks.
+ * Should be called with IRQs hard disabled.
+ * For use in RI disabled code or other constrained situations.
+ */
+static inline bool lazy_irq_pending_nocheck(void)
+{
+       return __lazy_irq_pending(local_paca->irq_happened);
 }
 
 /*
index a6371fb..8420abd 100644 (file)
@@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
        stw     r10,_CCR(r1)
        stw     r1,KSP(r3)      /* Set old stack pointer */
 
-       kuap_check r2, r4
+       kuap_check r2, r0
 #ifdef CONFIG_SMP
        /* We need a sync somewhere here to make sure that if the
         * previous task gets rescheduled on another CPU, it sees all
index 9a1e5d6..b3c9f15 100644 (file)
@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 #ifdef CONFIG_PPC_BOOK3S
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
-        * touched, AMR not set, no exit work created, then this can be used.
+        * touched, no exit work created, then this can be used.
         */
        .balign IFETCH_ALIGN_BYTES
        .globl fast_interrupt_return
 fast_interrupt_return:
 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
+       kuap_check_amr r3, r4
        ld      r4,_MSR(r1)
        andi.   r0,r4,MSR_PR
        bne     .Lfast_user_interrupt_return
+       kuap_restore_amr r3
        andi.   r0,r4,MSR_RI
        li      r3,0 /* 0 return value, no EMULATE_STACK_STORE */
        bne+    .Lfast_kernel_interrupt_return
index 0727728..7203608 100644 (file)
@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common)
        ld      r10,SOFTE(r1)
        stb     r10,PACAIRQSOFTMASK(r13)
 
+       kuap_restore_amr r10
        EXCEPTION_RESTORE_REGS
        RFI_TO_USER_OR_KERNEL
 
@@ -2435,6 +2436,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
        GEN_COMMON facility_unavailable
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      facility_unavailable_exception
+       REST_NVGPRS(r1) /* instruction emulation may change GPRs */
        b       interrupt_return
 
        GEN_KVM facility_unavailable
@@ -2464,6 +2466,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
        GEN_COMMON h_facility_unavailable
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      facility_unavailable_exception
+       REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
        b       interrupt_return
 
        GEN_KVM h_facility_unavailable
index daaa153..97c8879 100644 (file)
@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
        andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
 #endif
        bne     handle_page_fault_tramp_2       /* if not, try to put a PTE */
-       rlwinm  r3, r5, 32 - 24, 30, 30         /* DSISR_STORE -> _PAGE_RW */
+       rlwinm  r3, r5, 32 - 15, 21, 21         /* DSISR_STORE -> _PAGE_RW */
        bl      hash_page
        b       handle_page_fault_tramp_1
 FTR_SECTION_ELSE
@@ -497,6 +497,7 @@ InstructionTLBMiss:
        andc.   r1,r1,r0                /* check access & ~permission */
        bne-    InstructionAddressInvalid /* return if access not permitted */
        /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        ori     r1, r1, 0xe06           /* clear out reserved bits */
        andc    r1, r0, r1              /* PP = user? 1 : 0 */
 BEGIN_FTR_SECTION
@@ -564,8 +565,9 @@ DataLoadTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
-       rlwinm  r1,r0,0,30,30           /* _PAGE_RW -> PP msb */
-       rlwimi  r0,r0,1,30,30           /* _PAGE_USER -> PP msb */
+       rlwinm  r1,r0,32-9,30,30        /* _PAGE_RW -> PP msb */
+       rlwimi  r0,r0,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r0,r0,32-1,31,31        /* _PAGE_USER -> PP lsb */
        ori     r1,r1,0xe04             /* clear out reserved bits */
        andc    r1,r0,r1                /* PP = user? rw? 1: 3: 0 */
 BEGIN_FTR_SECTION
@@ -643,6 +645,7 @@ DataStoreTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        li      r1,0xe06                /* clear out reserved bits & PP msb */
        andc    r1,r0,r1                /* PP = user? 1: 0 */
 BEGIN_FTR_SECTION
index 9bb6639..2cec543 100644 (file)
@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit)
 /* 0x0C00 - System Call Exception */
        START_EXCEPTION(0x0C00, SystemCall)
        SYSCALL_ENTRY   0xc00
+/*     Trap_0D is commented out to get more space for system call exception */
 
-       EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
+/*     EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
        EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
        EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
 
index e341162..957abd5 100644 (file)
@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void)
  * to be stored as an xattr or as an appended signature.
  *
  * To avoid duplicate signature verification as much as possible, the IMA
- * policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE
+ * policy rule for module appraisal is added only if CONFIG_MODULE_SIG
  * is not enabled.
  */
 static const char *const secure_rules[] = {
        "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
        "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
 #endif
        NULL
@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = {
        "measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
        "measure func=MODULE_CHECK template=ima-modsig",
        "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
        "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
 #endif
        NULL
index defe05b..bb47555 100644 (file)
@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np,
        lsizep = of_get_property(np, propnames[3], NULL);
        if (bsizep == NULL)
                bsizep = lsizep;
+       if (lsizep == NULL)
+               lsizep = bsizep;
        if (lsizep != NULL)
                lsize = be32_to_cpu(*lsizep);
        if (bsizep != NULL)
index c74295a..7b7c89c 100644 (file)
@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
        BUG_ON(!FULL_REGS(regs));
        BUG_ON(regs->softe != IRQS_ENABLED);
 
+       kuap_check_amr();
+
        account_cpu_user_entry();
 
 #ifdef CONFIG_PPC_SPLPAR
@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
        }
 #endif
 
-       kuap_check_amr();
-
        /*
         * This is not required for the syscall exit path, but makes the
         * stack frame look nicer. If this was initialised in the first stack
@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
        unsigned long ti_flags;
        unsigned long ret = 0;
 
+       kuap_check_amr();
+
        regs->result = r3;
 
        /* Check whether the syscall is issued inside a restartable sequence */
@@ -189,7 +191,7 @@ again:
 
        /* This pattern matches prep_irq_for_idle */
        __hard_EE_RI_disable();
-       if (unlikely(lazy_irq_pending())) {
+       if (unlikely(lazy_irq_pending_nocheck())) {
                __hard_RI_enable();
                trace_hardirqs_off();
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -204,8 +206,6 @@ again:
        local_paca->tm_scratch = regs->msr;
 #endif
 
-       kuap_check_amr();
-
        account_cpu_user_exit();
 
        return ret;
@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
        BUG_ON(!FULL_REGS(regs));
        BUG_ON(regs->softe != IRQS_ENABLED);
 
+       kuap_check_amr();
+
        local_irq_save(flags);
 
 again:
@@ -264,7 +266,7 @@ again:
 
        trace_hardirqs_on();
        __hard_EE_RI_disable();
-       if (unlikely(lazy_irq_pending())) {
+       if (unlikely(lazy_irq_pending_nocheck())) {
                __hard_RI_enable();
                trace_hardirqs_off();
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -292,8 +294,6 @@ again:
        local_paca->tm_scratch = regs->msr;
 #endif
 
-       kuap_check_amr();
-
        account_cpu_user_exit();
 
        return ret;
@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
        BUG_ON(regs->msr & MSR_PR);
        BUG_ON(!FULL_REGS(regs));
 
+       kuap_check_amr();
+
        if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
                clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
                ret = 1;
@@ -334,7 +336,7 @@ again:
 
                trace_hardirqs_on();
                __hard_EE_RI_disable();
-               if (unlikely(lazy_irq_pending())) {
+               if (unlikely(lazy_irq_pending_nocheck())) {
                        __hard_RI_enable();
                        irq_soft_mask_set(IRQS_ALL_DISABLED);
                        trace_hardirqs_off();
index a395156..e7f8f9f 100644 (file)
@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
        blr
 
        /*
-        * invalid clock
+        * syscall fallback
         */
 99:
-       li      r3, EINVAL
-       crset   so
+       li      r0,__NR_clock_getres
+       sc
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_clock_getres)
index 6d23608..877d880 100644 (file)
@@ -35,7 +35,7 @@ mmu_hash_lock:
 /*
  * Load a PTE into the hash table, if possible.
  * The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x002) if a write.
+ * _PAGE_RW (0x400) if a write.
  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  * SPRG_THREAD contains the physical address of the current task's thread.
  *
@@ -69,7 +69,7 @@ _GLOBAL(hash_page)
        blt+    112f                    /* assume user more likely */
        lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
        addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
-       rlwimi  r3,r9,32-14,31,31       /* MSR_PR -> _PAGE_USER */
+       rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
 112:
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
@@ -94,7 +94,7 @@ _GLOBAL(hash_page)
 #else
        rlwimi  r8,r4,23,20,28          /* compute pte address */
 #endif
-       rlwinm  r0,r3,6,24,24           /* _PAGE_RW access -> _PAGE_DIRTY */
+       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
        ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 
        /*
@@ -310,9 +310,11 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
 
 _GLOBAL(create_hpte)
        /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
+       rlwinm  r8,r5,32-9,30,30        /* _PAGE_RW -> PP msb */
        rlwinm  r0,r5,32-6,30,30        /* _PAGE_DIRTY -> PP msb */
-       and     r8,r5,r0                /* writable if _RW & _DIRTY */
-       rlwimi  r5,r5,1,30,30           /* _PAGE_USER -> PP msb */
+       and     r8,r8,r0                /* writable if _RW & _DIRTY */
+       rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
        ori     r8,r8,0xe04             /* clear out reserved bits */
        andc    r8,r5,r8                /* PP = user? (rw&dirty? 1: 3): 0 */
 BEGIN_FTR_SECTION
@@ -564,7 +566,7 @@ _GLOBAL(flush_hash_pages)
 33:    lwarx   r8,0,r5                 /* fetch the pte flags word */
        andi.   r0,r8,_PAGE_HASHPTE
        beq     8f                      /* done if HASHPTE is already clear */
-       rlwinm  r8,r8,0,~_PAGE_HASHPTE  /* clear HASHPTE bit */
+       rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
        stwcx.  r8,0,r5                 /* update the pte */
        bne-    33b
 
index 404f269..e3fb0ef 100644 (file)
@@ -397,7 +397,7 @@ config PPC_KUAP
 
 config PPC_KUAP_DEBUG
        bool "Extra debugging for Kernel Userspace Access Protection"
-       depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32)
+       depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
        help
          Add extra debugging for Kernel Userspace Access Protection (KUAP)
          If you're unsure, say N.
index 2dceaca..891e1c3 100644 (file)
@@ -722,22 +722,22 @@ do {                                                                      \
 do { \
        if (__builtin_constant_p(bh) && (bh) == 0) \
                __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "%r" ((USItype)(ah)), \
                "%r" ((USItype)(al)), \
                "rI" ((USItype)(bl))); \
        else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
                __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "%r" ((USItype)(ah)), \
                "%r" ((USItype)(al)), \
                "rI" ((USItype)(bl))); \
        else \
                __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "%r" ((USItype)(ah)), \
                "r" ((USItype)(bh)), \
                "%r" ((USItype)(al)), \
@@ -747,36 +747,36 @@ do { \
 do { \
        if (__builtin_constant_p(ah) && (ah) == 0) \
                __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "r" ((USItype)(bh)), \
                "rI" ((USItype)(al)), \
                "r" ((USItype)(bl))); \
        else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
                __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "r" ((USItype)(bh)), \
                "rI" ((USItype)(al)), \
                "r" ((USItype)(bl))); \
        else if (__builtin_constant_p(bh) && (bh) == 0) \
                __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "r" ((USItype)(ah)), \
                "rI" ((USItype)(al)), \
                "r" ((USItype)(bl))); \
        else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
                __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "r" ((USItype)(ah)), \
                "rI" ((USItype)(al)), \
                "r" ((USItype)(bl))); \
        else \
                __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
-               : "=r" ((USItype)(sh)), \
-               "=&r" ((USItype)(sl)) \
+               : "=r" (sh), \
+               "=&r" (sl) \
                : "r" ((USItype)(ah)), \
                "r" ((USItype)(bh)), \
                "rI" ((USItype)(al)), \
@@ -787,7 +787,7 @@ do { \
 do { \
        USItype __m0 = (m0), __m1 = (m1); \
        __asm__ ("mulhwu %0,%1,%2" \
-       : "=r" ((USItype) ph) \
+       : "=r" (ph) \
        : "%r" (__m0), \
        "r" (__m1)); \
        (pl) = __m0 * __m1; \