Merge tag 'seccomp-v5.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / percpu.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 ARM Ltd.
4  */
5 #ifndef __ASM_PERCPU_H
6 #define __ASM_PERCPU_H
7
8 #include <linux/preempt.h>
9
10 #include <asm/alternative.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/stack_pointer.h>
13
14 static inline void set_my_cpu_offset(unsigned long off)
15 {
16         asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
17                                  "msr tpidr_el2, %0",
18                                  ARM64_HAS_VIRT_HOST_EXTN)
19                         :: "r" (off) : "memory");
20 }
21
22 static inline unsigned long __hyp_my_cpu_offset(void)
23 {
24         /*
25          * Non-VHE hyp code runs with preemption disabled. No need to hazard
26          * the register access against barrier() as in __kern_my_cpu_offset.
27          */
28         return read_sysreg(tpidr_el2);
29 }
30
31 static inline unsigned long __kern_my_cpu_offset(void)
32 {
33         unsigned long off;
34
35         /*
36          * We want to allow caching the value, so avoid using volatile and
37          * instead use a fake stack read to hazard against barrier().
38          */
39         asm(ALTERNATIVE("mrs %0, tpidr_el1",
40                         "mrs %0, tpidr_el2",
41                         ARM64_HAS_VIRT_HOST_EXTN)
42                 : "=r" (off) :
43                 "Q" (*(const unsigned long *)current_stack_pointer));
44
45         return off;
46 }
47
48 #ifdef __KVM_NVHE_HYPERVISOR__
49 #define __my_cpu_offset __hyp_my_cpu_offset()
50 #else
51 #define __my_cpu_offset __kern_my_cpu_offset()
52 #endif
53
54 #define PERCPU_RW_OPS(sz)                                               \
55 static inline unsigned long __percpu_read_##sz(void *ptr)               \
56 {                                                                       \
57         return READ_ONCE(*(u##sz *)ptr);                                \
58 }                                                                       \
59                                                                         \
60 static inline void __percpu_write_##sz(void *ptr, unsigned long val)    \
61 {                                                                       \
62         WRITE_ONCE(*(u##sz *)ptr, (u##sz)val);                          \
63 }
64
65 #define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)             \
66 static inline void                                                      \
67 __percpu_##name##_case_##sz(void *ptr, unsigned long val)               \
68 {                                                                       \
69         unsigned int loop;                                              \
70         u##sz tmp;                                                      \
71                                                                         \
72         asm volatile (ARM64_LSE_ATOMIC_INSN(                            \
73         /* LL/SC */                                                     \
74         "1:     ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n"                   \
75                 #op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
76         "       stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n"         \
77         "       cbnz    %w[loop], 1b",                                  \
78         /* LSE atomics */                                               \
79                 #op_lse "\t%" #w "[val], %[ptr]\n"                      \
80                 __nops(3))                                              \
81         : [loop] "=&r" (loop), [tmp] "=&r" (tmp),                       \
82           [ptr] "+Q"(*(u##sz *)ptr)                                     \
83         : [val] "r" ((u##sz)(val)));                                    \
84 }
85
86 #define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse)         \
87 static inline u##sz                                                     \
88 __percpu_##name##_return_case_##sz(void *ptr, unsigned long val)        \
89 {                                                                       \
90         unsigned int loop;                                              \
91         u##sz ret;                                                      \
92                                                                         \
93         asm volatile (ARM64_LSE_ATOMIC_INSN(                            \
94         /* LL/SC */                                                     \
95         "1:     ldxr" #sfx "\t%" #w "[ret], %[ptr]\n"                   \
96                 #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
97         "       stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n"         \
98         "       cbnz    %w[loop], 1b",                                  \
99         /* LSE atomics */                                               \
100                 #op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n"        \
101                 #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
102                 __nops(2))                                              \
103         : [loop] "=&r" (loop), [ret] "=&r" (ret),                       \
104           [ptr] "+Q"(*(u##sz *)ptr)                                     \
105         : [val] "r" ((u##sz)(val)));                                    \
106                                                                         \
107         return ret;                                                     \
108 }
109
110 #define PERCPU_OP(name, op_llsc, op_lse)                                \
111         __PERCPU_OP_CASE(w, b, name,  8, op_llsc, op_lse)               \
112         __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse)               \
113         __PERCPU_OP_CASE(w,  , name, 32, op_llsc, op_lse)               \
114         __PERCPU_OP_CASE( ,  , name, 64, op_llsc, op_lse)
115
116 #define PERCPU_RET_OP(name, op_llsc, op_lse)                            \
117         __PERCPU_RET_OP_CASE(w, b, name,  8, op_llsc, op_lse)           \
118         __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse)           \
119         __PERCPU_RET_OP_CASE(w,  , name, 32, op_llsc, op_lse)           \
120         __PERCPU_RET_OP_CASE( ,  , name, 64, op_llsc, op_lse)
121
122 PERCPU_RW_OPS(8)
123 PERCPU_RW_OPS(16)
124 PERCPU_RW_OPS(32)
125 PERCPU_RW_OPS(64)
126 PERCPU_OP(add, add, stadd)
127 PERCPU_OP(andnot, bic, stclr)
128 PERCPU_OP(or, orr, stset)
129 PERCPU_RET_OP(add, add, ldadd)
130
131 #undef PERCPU_RW_OPS
132 #undef __PERCPU_OP_CASE
133 #undef __PERCPU_RET_OP_CASE
134 #undef PERCPU_OP
135 #undef PERCPU_RET_OP
136
137 /*
138  * It would be nice to avoid the conditional call into the scheduler when
139  * re-enabling preemption for preemptible kernels, but doing that in a way
140  * which builds inside a module would mean messing directly with the preempt
141  * count. If you do this, peterz and tglx will hunt you down.
142  */
143 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)           \
144 ({                                                                      \
145         int __ret;                                                      \
146         preempt_disable_notrace();                                      \
147         __ret = cmpxchg_double_local(   raw_cpu_ptr(&(ptr1)),           \
148                                         raw_cpu_ptr(&(ptr2)),           \
149                                         o1, o2, n1, n2);                \
150         preempt_enable_notrace();                                       \
151         __ret;                                                          \
152 })
153
154 #define _pcp_protect(op, pcp, ...)                                      \
155 ({                                                                      \
156         preempt_disable_notrace();                                      \
157         op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);                           \
158         preempt_enable_notrace();                                       \
159 })
160
161 #define _pcp_protect_return(op, pcp, args...)                           \
162 ({                                                                      \
163         typeof(pcp) __retval;                                           \
164         preempt_disable_notrace();                                      \
165         __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);        \
166         preempt_enable_notrace();                                       \
167         __retval;                                                       \
168 })
169
170 #define this_cpu_read_1(pcp)            \
171         _pcp_protect_return(__percpu_read_8, pcp)
172 #define this_cpu_read_2(pcp)            \
173         _pcp_protect_return(__percpu_read_16, pcp)
174 #define this_cpu_read_4(pcp)            \
175         _pcp_protect_return(__percpu_read_32, pcp)
176 #define this_cpu_read_8(pcp)            \
177         _pcp_protect_return(__percpu_read_64, pcp)
178
179 #define this_cpu_write_1(pcp, val)      \
180         _pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
181 #define this_cpu_write_2(pcp, val)      \
182         _pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
183 #define this_cpu_write_4(pcp, val)      \
184         _pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
185 #define this_cpu_write_8(pcp, val)      \
186         _pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
187
188 #define this_cpu_add_1(pcp, val)        \
189         _pcp_protect(__percpu_add_case_8, pcp, val)
190 #define this_cpu_add_2(pcp, val)        \
191         _pcp_protect(__percpu_add_case_16, pcp, val)
192 #define this_cpu_add_4(pcp, val)        \
193         _pcp_protect(__percpu_add_case_32, pcp, val)
194 #define this_cpu_add_8(pcp, val)        \
195         _pcp_protect(__percpu_add_case_64, pcp, val)
196
197 #define this_cpu_add_return_1(pcp, val) \
198         _pcp_protect_return(__percpu_add_return_case_8, pcp, val)
199 #define this_cpu_add_return_2(pcp, val) \
200         _pcp_protect_return(__percpu_add_return_case_16, pcp, val)
201 #define this_cpu_add_return_4(pcp, val) \
202         _pcp_protect_return(__percpu_add_return_case_32, pcp, val)
203 #define this_cpu_add_return_8(pcp, val) \
204         _pcp_protect_return(__percpu_add_return_case_64, pcp, val)
205
206 #define this_cpu_and_1(pcp, val)        \
207         _pcp_protect(__percpu_andnot_case_8, pcp, ~val)
208 #define this_cpu_and_2(pcp, val)        \
209         _pcp_protect(__percpu_andnot_case_16, pcp, ~val)
210 #define this_cpu_and_4(pcp, val)        \
211         _pcp_protect(__percpu_andnot_case_32, pcp, ~val)
212 #define this_cpu_and_8(pcp, val)        \
213         _pcp_protect(__percpu_andnot_case_64, pcp, ~val)
214
215 #define this_cpu_or_1(pcp, val)         \
216         _pcp_protect(__percpu_or_case_8, pcp, val)
217 #define this_cpu_or_2(pcp, val)         \
218         _pcp_protect(__percpu_or_case_16, pcp, val)
219 #define this_cpu_or_4(pcp, val)         \
220         _pcp_protect(__percpu_or_case_32, pcp, val)
221 #define this_cpu_or_8(pcp, val)         \
222         _pcp_protect(__percpu_or_case_64, pcp, val)
223
224 #define this_cpu_xchg_1(pcp, val)       \
225         _pcp_protect_return(xchg_relaxed, pcp, val)
226 #define this_cpu_xchg_2(pcp, val)       \
227         _pcp_protect_return(xchg_relaxed, pcp, val)
228 #define this_cpu_xchg_4(pcp, val)       \
229         _pcp_protect_return(xchg_relaxed, pcp, val)
230 #define this_cpu_xchg_8(pcp, val)       \
231         _pcp_protect_return(xchg_relaxed, pcp, val)
232
233 #define this_cpu_cmpxchg_1(pcp, o, n)   \
234         _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
235 #define this_cpu_cmpxchg_2(pcp, o, n)   \
236         _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
237 #define this_cpu_cmpxchg_4(pcp, o, n)   \
238         _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
239 #define this_cpu_cmpxchg_8(pcp, o, n)   \
240         _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
241
242 #ifdef __KVM_NVHE_HYPERVISOR__
243 extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
244 #define __per_cpu_offset
245 #define per_cpu_offset(cpu)     __hyp_per_cpu_offset((cpu))
246 #endif
247
248 #include <asm-generic/percpu.h>
249
250 /* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
251 #if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
252 #undef  this_cpu_ptr
253 #define this_cpu_ptr            raw_cpu_ptr
254 #undef  __this_cpu_read
255 #define __this_cpu_read         raw_cpu_read
256 #undef  __this_cpu_write
257 #define __this_cpu_write        raw_cpu_write
258 #endif
259
260 #endif /* __ASM_PERCPU_H */