2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
8 #ifndef __ASM_BARRIER_H
9 #define __ASM_BARRIER_H
11 #include <asm/addrspace.h>
14 static inline void __sync(void)
16 asm volatile(__SYNC(full, always) ::: "memory");
19 static inline void rmb(void)
21 asm volatile(__SYNC(rmb, always) ::: "memory");
25 static inline void wmb(void)
27 asm volatile(__SYNC(wmb, always) ::: "memory");
31 #define fast_mb() __sync()
33 #define __fast_iob() \
34 __asm__ __volatile__( \
36 ".set noreorder\n\t" \
41 : "m" (*(int *)CKSEG1) \
43 #ifdef CONFIG_CPU_CAVIUM_OCTEON
44 # define fast_iob() do { } while (0)
45 #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
46 # ifdef CONFIG_SGI_IP28
48 __asm__ __volatile__( \
50 ".set noreorder\n\t" \
56 : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
65 #endif /* CONFIG_CPU_CAVIUM_OCTEON */
67 #ifdef CONFIG_CPU_HAS_WB
69 #include <asm/wbflush.h>
71 #define mb() wbflush()
72 #define iob() wbflush()
74 #else /* !CONFIG_CPU_HAS_WB */
76 #define mb() fast_mb()
77 #define iob() fast_iob()
79 #endif /* !CONFIG_CPU_HAS_WB */
81 #if defined(CONFIG_WEAK_ORDERING)
82 # define __smp_mb() __sync()
83 # define __smp_rmb() rmb()
84 # define __smp_wmb() wmb()
86 # define __smp_mb() barrier()
87 # define __smp_rmb() barrier()
88 # define __smp_wmb() barrier()
92 * When LL/SC does imply order, it must also be a compiler barrier to avoid the
93 * compiler from reordering where the CPU will not. When it does not imply
94 * order, the compiler is also free to reorder across the LL/SC loop and
95 * ordering will be done by smp_llsc_mb() and friends.
97 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
98 #define __WEAK_LLSC_MB " sync \n"
99 #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
100 #define __LLSC_CLOBBER
102 #define __WEAK_LLSC_MB " \n"
103 #define smp_llsc_mb() do { } while (0)
104 #define __LLSC_CLOBBER "memory"
107 #ifdef CONFIG_CPU_CAVIUM_OCTEON
108 #define smp_mb__before_llsc() smp_wmb()
109 #define __smp_mb__before_llsc() __smp_wmb()
110 /* Cause previous writes to become visible on all CPUs as soon as possible */
111 #define nudge_writes() __asm__ __volatile__(".set push\n\t" \
112 ".set arch=octeon\n\t" \
114 ".set pop" : : : "memory")
116 #define smp_mb__before_llsc() smp_llsc_mb()
117 #define __smp_mb__before_llsc() smp_llsc_mb()
118 #define nudge_writes() mb()
121 #define __smp_mb__before_atomic() __smp_mb__before_llsc()
122 #define __smp_mb__after_atomic() smp_llsc_mb()
125 * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
126 * store or prefetch) in between an LL & SC can cause the SC instruction to
127 * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
128 * containing such sequences, this bug bites harder than we might otherwise
129 * expect due to reordering & speculation:
131 * 1) A memory access appearing prior to the LL in program order may actually
132 * be executed after the LL - this is the reordering case.
134 * In order to avoid this we need to place a memory barrier (ie. a SYNC
135 * instruction) prior to every LL instruction, in between it and any earlier
136 * memory access instructions.
138 * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
140 * 2) If a conditional branch exists between an LL & SC with a target outside
141 * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
142 * or similar, then misprediction of the branch may allow speculative
143 * execution of memory accesses from outside of the LL-SC loop.
145 * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
146 * at each affected branch target, for which we also use loongson_llsc_mb()
149 * This case affects all current Loongson 3 CPUs.
151 * The above described cases cause an error in the cache coherence protocol;
152 * such that the Invalidate of a competing LL-SC goes 'missing' and SC
153 * erroneously observes its core still has Exclusive state and lets the SC
156 * Therefore the error only occurs on SMP systems.
158 #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
159 #define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory")
161 #define loongson_llsc_mb() do { } while (0)
164 static inline void sync_ginv(void)
166 asm volatile(__SYNC(ginv, always));
169 #include <asm-generic/barrier.h>
171 #endif /* __ASM_BARRIER_H */