1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_SYNCH_H
3 #define _ASM_POWERPC_SYNCH_H
6 #include <asm/cputable.h>
7 #include <asm/feature-fixups.h>
8 #include <asm/ppc-opcode.h>
11 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
12 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
15 static inline void eieio(void)
17 __asm__ __volatile__ ("eieio" : : : "memory");
20 static inline void isync(void)
22 __asm__ __volatile__ ("isync" : : : "memory");
25 static inline void ppc_after_tlbiel_barrier(void)
27 asm volatile("ptesync": : :"memory");
29 * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
30 * invalidated correctly. If this is not done, the paste can take data
31 * from the physical address that was translated at copy time.
33 * POWER9 in practice does not need this, because address spaces with
34 * accelerators mapped will use tlbie (which does invalidate the copy)
35 * to invalidate translations. It's not possible to limit POWER10 this
36 * way due to local copy-paste.
38 asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
40 #endif /* __ASSEMBLY__ */
42 #if defined(__powerpc64__)
43 # define LWSYNC lwsync
44 #elif defined(CONFIG_E500)
46 START_LWSYNC_SECTION(96); \
48 MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
54 #define __PPC_ACQUIRE_BARRIER \
55 START_LWSYNC_SECTION(97); \
57 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
58 #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
59 #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
60 #define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
61 #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
63 #define PPC_ACQUIRE_BARRIER
64 #define PPC_RELEASE_BARRIER
65 #define PPC_ATOMIC_ENTRY_BARRIER
66 #define PPC_ATOMIC_EXIT_BARRIER
69 #endif /* __KERNEL__ */
70 #endif /* _ASM_POWERPC_SYNCH_H */