2 * Copyright (C) 2013 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_PERCPU_H
17 #define __ASM_PERCPU_H
19 #include <linux/preempt.h>
21 #include <asm/alternative.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/stack_pointer.h>
25 static inline void set_my_cpu_offset(unsigned long off)
27 asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
29 ARM64_HAS_VIRT_HOST_EXTN)
30 :: "r" (off) : "memory");
33 static inline unsigned long __my_cpu_offset(void)
38 * We want to allow caching the value, so avoid using volatile and
39 * instead use a fake stack read to hazard against barrier().
41 asm(ALTERNATIVE("mrs %0, tpidr_el1",
43 ARM64_HAS_VIRT_HOST_EXTN)
45 "Q" (*(const unsigned long *)current_stack_pointer));
49 #define __my_cpu_offset __my_cpu_offset()
51 #define PERCPU_RW_OPS(sz) \
52 static inline unsigned long __percpu_read_##sz(void *ptr) \
54 return READ_ONCE(*(u##sz *)ptr); \
57 static inline void __percpu_write_##sz(void *ptr, unsigned long val) \
59 WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \
62 #define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
64 __percpu_##name##_case_##sz(void *ptr, unsigned long val) \
69 asm volatile (ARM64_LSE_ATOMIC_INSN( \
71 "1: ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n" \
72 #op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
73 " stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n" \
74 " cbnz %w[loop], 1b", \
76 #op_lse "\t%" #w "[val], %[ptr]\n" \
78 : [loop] "=&r" (loop), [tmp] "=&r" (tmp), \
79 [ptr] "+Q"(*(u##sz *)ptr) \
80 : [val] "r" ((u##sz)(val))); \
83 #define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
85 __percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \
90 asm volatile (ARM64_LSE_ATOMIC_INSN( \
92 "1: ldxr" #sfx "\t%" #w "[ret], %[ptr]\n" \
93 #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
94 " stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n" \
95 " cbnz %w[loop], 1b", \
97 #op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n" \
98 #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
100 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
101 [ptr] "+Q"(*(u##sz *)ptr) \
102 : [val] "r" ((u##sz)(val))); \
107 #define PERCPU_OP(name, op_llsc, op_lse) \
108 __PERCPU_OP_CASE(w, b, name, 8, op_llsc, op_lse) \
109 __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse) \
110 __PERCPU_OP_CASE(w, , name, 32, op_llsc, op_lse) \
111 __PERCPU_OP_CASE( , , name, 64, op_llsc, op_lse)
113 #define PERCPU_RET_OP(name, op_llsc, op_lse) \
114 __PERCPU_RET_OP_CASE(w, b, name, 8, op_llsc, op_lse) \
115 __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse) \
116 __PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \
117 __PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse)
123 PERCPU_OP(add, add, stadd)
124 PERCPU_OP(andnot, bic, stclr)
125 PERCPU_OP(or, orr, stset)
126 PERCPU_RET_OP(add, add, ldadd)
129 #undef __PERCPU_OP_CASE
130 #undef __PERCPU_RET_OP_CASE
135 * It would be nice to avoid the conditional call into the scheduler when
136 * re-enabling preemption for preemptible kernels, but doing that in a way
137 * which builds inside a module would mean messing directly with the preempt
138 * count. If you do this, peterz and tglx will hunt you down.
140 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
143 preempt_disable_notrace(); \
144 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
145 raw_cpu_ptr(&(ptr2)), \
147 preempt_enable_notrace(); \
151 #define _pcp_protect(op, pcp, ...) \
153 preempt_disable_notrace(); \
154 op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \
155 preempt_enable_notrace(); \
158 #define _pcp_protect_return(op, pcp, args...) \
160 typeof(pcp) __retval; \
161 preempt_disable_notrace(); \
162 __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args); \
163 preempt_enable_notrace(); \
167 #define this_cpu_read_1(pcp) \
168 _pcp_protect_return(__percpu_read_8, pcp)
169 #define this_cpu_read_2(pcp) \
170 _pcp_protect_return(__percpu_read_16, pcp)
171 #define this_cpu_read_4(pcp) \
172 _pcp_protect_return(__percpu_read_32, pcp)
173 #define this_cpu_read_8(pcp) \
174 _pcp_protect_return(__percpu_read_64, pcp)
176 #define this_cpu_write_1(pcp, val) \
177 _pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
178 #define this_cpu_write_2(pcp, val) \
179 _pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
180 #define this_cpu_write_4(pcp, val) \
181 _pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
182 #define this_cpu_write_8(pcp, val) \
183 _pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
185 #define this_cpu_add_1(pcp, val) \
186 _pcp_protect(__percpu_add_case_8, pcp, val)
187 #define this_cpu_add_2(pcp, val) \
188 _pcp_protect(__percpu_add_case_16, pcp, val)
189 #define this_cpu_add_4(pcp, val) \
190 _pcp_protect(__percpu_add_case_32, pcp, val)
191 #define this_cpu_add_8(pcp, val) \
192 _pcp_protect(__percpu_add_case_64, pcp, val)
194 #define this_cpu_add_return_1(pcp, val) \
195 _pcp_protect_return(__percpu_add_return_case_8, pcp, val)
196 #define this_cpu_add_return_2(pcp, val) \
197 _pcp_protect_return(__percpu_add_return_case_16, pcp, val)
198 #define this_cpu_add_return_4(pcp, val) \
199 _pcp_protect_return(__percpu_add_return_case_32, pcp, val)
200 #define this_cpu_add_return_8(pcp, val) \
201 _pcp_protect_return(__percpu_add_return_case_64, pcp, val)
203 #define this_cpu_and_1(pcp, val) \
204 _pcp_protect(__percpu_andnot_case_8, pcp, ~val)
205 #define this_cpu_and_2(pcp, val) \
206 _pcp_protect(__percpu_andnot_case_16, pcp, ~val)
207 #define this_cpu_and_4(pcp, val) \
208 _pcp_protect(__percpu_andnot_case_32, pcp, ~val)
209 #define this_cpu_and_8(pcp, val) \
210 _pcp_protect(__percpu_andnot_case_64, pcp, ~val)
212 #define this_cpu_or_1(pcp, val) \
213 _pcp_protect(__percpu_or_case_8, pcp, val)
214 #define this_cpu_or_2(pcp, val) \
215 _pcp_protect(__percpu_or_case_16, pcp, val)
216 #define this_cpu_or_4(pcp, val) \
217 _pcp_protect(__percpu_or_case_32, pcp, val)
218 #define this_cpu_or_8(pcp, val) \
219 _pcp_protect(__percpu_or_case_64, pcp, val)
221 #define this_cpu_xchg_1(pcp, val) \
222 _pcp_protect_return(xchg_relaxed, pcp, val)
223 #define this_cpu_xchg_2(pcp, val) \
224 _pcp_protect_return(xchg_relaxed, pcp, val)
225 #define this_cpu_xchg_4(pcp, val) \
226 _pcp_protect_return(xchg_relaxed, pcp, val)
227 #define this_cpu_xchg_8(pcp, val) \
228 _pcp_protect_return(xchg_relaxed, pcp, val)
230 #define this_cpu_cmpxchg_1(pcp, o, n) \
231 _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
232 #define this_cpu_cmpxchg_2(pcp, o, n) \
233 _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
234 #define this_cpu_cmpxchg_4(pcp, o, n) \
235 _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
236 #define this_cpu_cmpxchg_8(pcp, o, n) \
237 _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
239 #include <asm-generic/percpu.h>
241 #endif /* __ASM_PERCPU_H */