powerpc/mm: PTE_ATOMIC_UPDATES is only for 40x
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Tue, 19 May 2020 05:49:00 +0000 (05:49 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 26 May 2020 12:22:20 +0000 (22:22 +1000)
Only 40x still uses PTE_ATOMIC_UPDATES.
40x cannot not select CONFIG_PTE64_BIT.

Drop handling of PTE_ATOMIC_UPDATES:
- In nohash/64
- In nohash/32 for CONFIG_PTE_64BIT

Keep PTE_ATOMIC_UPDATES only for nohash/32 for !CONFIG_PTE_64BIT

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d6f8e1f46583f1842de24581a68b0496feb15516.1589866984.git.christophe.leroy@csgroup.eu
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h

index 4315d40..7e908a1 100644 (file)
@@ -262,25 +262,8 @@ static inline unsigned long long pte_update(pte_t *p,
                                            unsigned long clr,
                                            unsigned long set)
 {
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long long old;
-       unsigned long tmp;
-
-       __asm__ __volatile__("\
-1:     lwarx   %L0,0,%4\n\
-       lwzx    %0,0,%3\n\
-       andc    %1,%L0,%5\n\
-       or      %1,%1,%6\n"
-       PPC405_ERR77(0,%3)
-"      stwcx.  %1,0,%4\n\
-       bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*p)
-       : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
-       : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
        unsigned long long old = pte_val(*p);
        *p = __pte((old & ~(unsigned long long)clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
 
 #ifdef CONFIG_44x
        if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
index 9a33b8b..9c703b1 100644 (file)
@@ -211,22 +211,9 @@ static inline unsigned long pte_update(struct mm_struct *mm,
                                       unsigned long set,
                                       int huge)
 {
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long old, tmp;
-
-       __asm__ __volatile__(
-       "1:     ldarx   %0,0,%3         # pte_update\n\
-       andc    %1,%0,%4 \n\
-       or      %1,%1,%6\n\
-       stdcx.  %1,0,%3 \n\
-       bne-    1b"
-       : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
-       : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
-       : "cc" );
-#else
        unsigned long old = pte_val(*ptep);
        *ptep = __pte((old & ~clr) | set);
-#endif
+
        /* huge pages use the old page table lock */
        if (!huge)
                assert_pte_locked(mm, addr);
@@ -310,21 +297,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
        unsigned long bits = pte_val(entry) &
                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 
-#ifdef PTE_ATOMIC_UPDATES
-       unsigned long old, tmp;
-
-       __asm__ __volatile__(
-       "1:     ldarx   %0,0,%4\n\
-               or      %0,%3,%0\n\
-               stdcx.  %0,0,%4\n\
-               bne-    1b"
-       :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
-       :"r" (bits), "r" (ptep), "m" (*ptep)
-       :"cc");
-#else
        unsigned long old = pte_val(*ptep);
        *ptep = __pte(old | bits);
-#endif
 
        flush_tlb_page(vma, address);
 }