Merge branch 'rework/kthreads' into for-linus
[linux-2.6-microblaze.git] / include / linux / jump_label.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4
5 /*
6  * Jump label support
7  *
8  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  *
11  * DEPRECATED API:
12  *
13  * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14  * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15  *
16  * struct static_key false = STATIC_KEY_INIT_FALSE;
17  * struct static_key true = STATIC_KEY_INIT_TRUE;
18  * static_key_true()
19  * static_key_false()
20  *
21  * The updated API replacements are:
22  *
23  * DEFINE_STATIC_KEY_TRUE(key);
24  * DEFINE_STATIC_KEY_FALSE(key);
25  * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26  * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27  * static_branch_likely()
28  * static_branch_unlikely()
29  *
30  * Jump labels provide an interface to generate dynamic branches using
31  * self-modifying code. Assuming toolchain and architecture support, if we
32  * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33  * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34  * (which defaults to false - and the true block is placed out of line).
35  * Similarly, we can define an initially true key via
36  * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37  * "if (static_branch_unlikely(&key))", in which case we will generate an
38  * unconditional branch to the out-of-line true branch. Keys that are
39  * initially true or false can be using in both static_branch_unlikely()
40  * and static_branch_likely() statements.
41  *
42  * At runtime we can change the branch target by setting the key
43  * to true via a call to static_branch_enable(), or false using
44  * static_branch_disable(). If the direction of the branch is switched by
45  * these calls then we run-time modify the branch target via a
46  * no-op -> jump or jump -> no-op conversion. For example, for an
47  * initially false key that is used in an "if (static_branch_unlikely(&key))"
48  * statement, setting the key to true requires us to patch in a jump
49  * to the out-of-line of true branch.
50  *
51  * In addition to static_branch_{enable,disable}, we can also reference count
52  * the key or branch direction via static_branch_{inc,dec}. Thus,
53  * static_branch_inc() can be thought of as a 'make more true' and
54  * static_branch_dec() as a 'make more false'.
55  *
56  * Since this relies on modifying code, the branch modifying functions
57  * must be considered absolute slow paths (machine wide synchronization etc.).
58  * OTOH, since the affected branches are unconditional, their runtime overhead
59  * will be absolutely minimal, esp. in the default (off) case where the total
60  * effect is a single NOP of appropriate size. The on case will patch in a jump
61  * to the out-of-line block.
62  *
63  * When the control is directly exposed to userspace, it is prudent to delay the
64  * decrement to avoid high frequency code modifications which can (and do)
65  * cause significant performance degradation. Struct static_key_deferred and
66  * static_key_slow_dec_deferred() provide for this.
67  *
68  * Lacking toolchain and or architecture support, static keys fall back to a
69  * simple conditional branch.
70  *
71  * Additional babbling in: Documentation/staging/static-keys.rst
72  */
73
74 #ifndef __ASSEMBLY__
75
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78
79 extern bool static_key_initialized;
80
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized,               \
82                                     "%s(): static key '%pS' used before call to jump_label_init()", \
83                                     __func__, (key))
84
85 struct static_key {
86         atomic_t enabled;
87 #ifdef CONFIG_JUMP_LABEL
88 /*
89  * Note:
90  *   To make anonymous unions work with old compilers, the static
91  *   initialization of them requires brackets. This creates a dependency
92  *   on the order of the struct with the initializers. If any fields
93  *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
94  *   to be modified.
95  *
96  * bit 0 => 1 if key is initially true
97  *          0 if initially false
98  * bit 1 => 1 if points to struct static_key_mod
99  *          0 if points to struct jump_entry
100  */
101         union {
102                 unsigned long type;
103                 struct jump_entry *entries;
104                 struct static_key_mod *next;
105         };
106 #endif  /* CONFIG_JUMP_LABEL */
107 };
108
109 #endif /* __ASSEMBLY__ */
110
111 #ifdef CONFIG_JUMP_LABEL
112 #include <asm/jump_label.h>
113
114 #ifndef __ASSEMBLY__
115 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
116
117 struct jump_entry {
118         s32 code;
119         s32 target;
120         long key;       // key may be far away from the core kernel under KASLR
121 };
122
123 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
124 {
125         return (unsigned long)&entry->code + entry->code;
126 }
127
128 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
129 {
130         return (unsigned long)&entry->target + entry->target;
131 }
132
133 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
134 {
135         long offset = entry->key & ~3L;
136
137         return (struct static_key *)((unsigned long)&entry->key + offset);
138 }
139
140 #else
141
142 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
143 {
144         return entry->code;
145 }
146
147 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
148 {
149         return entry->target;
150 }
151
152 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
153 {
154         return (struct static_key *)((unsigned long)entry->key & ~3UL);
155 }
156
157 #endif
158
159 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
160 {
161         return (unsigned long)entry->key & 1UL;
162 }
163
164 static inline bool jump_entry_is_init(const struct jump_entry *entry)
165 {
166         return (unsigned long)entry->key & 2UL;
167 }
168
169 static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
170 {
171         if (set)
172                 entry->key |= 2;
173         else
174                 entry->key &= ~2;
175 }
176
177 static inline int jump_entry_size(struct jump_entry *entry)
178 {
179 #ifdef JUMP_LABEL_NOP_SIZE
180         return JUMP_LABEL_NOP_SIZE;
181 #else
182         return arch_jump_entry_size(entry);
183 #endif
184 }
185
186 #endif
187 #endif
188
189 #ifndef __ASSEMBLY__
190
191 enum jump_label_type {
192         JUMP_LABEL_NOP = 0,
193         JUMP_LABEL_JMP,
194 };
195
196 struct module;
197
198 #ifdef CONFIG_JUMP_LABEL
199
200 #define JUMP_TYPE_FALSE         0UL
201 #define JUMP_TYPE_TRUE          1UL
202 #define JUMP_TYPE_LINKED        2UL
203 #define JUMP_TYPE_MASK          3UL
204
205 static __always_inline bool static_key_false(struct static_key *key)
206 {
207         return arch_static_branch(key, false);
208 }
209
210 static __always_inline bool static_key_true(struct static_key *key)
211 {
212         return !arch_static_branch(key, true);
213 }
214
215 extern struct jump_entry __start___jump_table[];
216 extern struct jump_entry __stop___jump_table[];
217
218 extern void jump_label_init(void);
219 extern void jump_label_lock(void);
220 extern void jump_label_unlock(void);
221 extern void arch_jump_label_transform(struct jump_entry *entry,
222                                       enum jump_label_type type);
223 extern void arch_jump_label_transform_static(struct jump_entry *entry,
224                                              enum jump_label_type type);
225 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
226                                             enum jump_label_type type);
227 extern void arch_jump_label_transform_apply(void);
228 extern int jump_label_text_reserved(void *start, void *end);
229 extern void static_key_slow_inc(struct static_key *key);
230 extern void static_key_slow_dec(struct static_key *key);
231 extern void static_key_slow_inc_cpuslocked(struct static_key *key);
232 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
233 extern void jump_label_apply_nops(struct module *mod);
234 extern int static_key_count(struct static_key *key);
235 extern void static_key_enable(struct static_key *key);
236 extern void static_key_disable(struct static_key *key);
237 extern void static_key_enable_cpuslocked(struct static_key *key);
238 extern void static_key_disable_cpuslocked(struct static_key *key);
239
240 /*
241  * We should be using ATOMIC_INIT() for initializing .enabled, but
242  * the inclusion of atomic.h is problematic for inclusion of jump_label.h
243  * in 'low-level' headers. Thus, we are initializing .enabled with a
244  * raw value, but have added a BUILD_BUG_ON() to catch any issues in
245  * jump_label_init() see: kernel/jump_label.c.
246  */
247 #define STATIC_KEY_INIT_TRUE                                    \
248         { .enabled = { 1 },                                     \
249           { .type = JUMP_TYPE_TRUE } }
250 #define STATIC_KEY_INIT_FALSE                                   \
251         { .enabled = { 0 },                                     \
252           { .type = JUMP_TYPE_FALSE } }
253
254 #else  /* !CONFIG_JUMP_LABEL */
255
256 #include <linux/atomic.h>
257 #include <linux/bug.h>
258
259 static __always_inline int static_key_count(struct static_key *key)
260 {
261         return arch_atomic_read(&key->enabled);
262 }
263
264 static __always_inline void jump_label_init(void)
265 {
266         static_key_initialized = true;
267 }
268
269 static __always_inline bool static_key_false(struct static_key *key)
270 {
271         if (unlikely_notrace(static_key_count(key) > 0))
272                 return true;
273         return false;
274 }
275
276 static __always_inline bool static_key_true(struct static_key *key)
277 {
278         if (likely_notrace(static_key_count(key) > 0))
279                 return true;
280         return false;
281 }
282
283 static inline void static_key_slow_inc(struct static_key *key)
284 {
285         STATIC_KEY_CHECK_USE(key);
286         atomic_inc(&key->enabled);
287 }
288
289 static inline void static_key_slow_dec(struct static_key *key)
290 {
291         STATIC_KEY_CHECK_USE(key);
292         atomic_dec(&key->enabled);
293 }
294
295 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
296 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
297
298 static inline int jump_label_text_reserved(void *start, void *end)
299 {
300         return 0;
301 }
302
303 static inline void jump_label_lock(void) {}
304 static inline void jump_label_unlock(void) {}
305
306 static inline int jump_label_apply_nops(struct module *mod)
307 {
308         return 0;
309 }
310
311 static inline void static_key_enable(struct static_key *key)
312 {
313         STATIC_KEY_CHECK_USE(key);
314
315         if (atomic_read(&key->enabled) != 0) {
316                 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
317                 return;
318         }
319         atomic_set(&key->enabled, 1);
320 }
321
322 static inline void static_key_disable(struct static_key *key)
323 {
324         STATIC_KEY_CHECK_USE(key);
325
326         if (atomic_read(&key->enabled) != 1) {
327                 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
328                 return;
329         }
330         atomic_set(&key->enabled, 0);
331 }
332
333 #define static_key_enable_cpuslocked(k)         static_key_enable((k))
334 #define static_key_disable_cpuslocked(k)        static_key_disable((k))
335
336 #define STATIC_KEY_INIT_TRUE    { .enabled = ATOMIC_INIT(1) }
337 #define STATIC_KEY_INIT_FALSE   { .enabled = ATOMIC_INIT(0) }
338
339 #endif  /* CONFIG_JUMP_LABEL */
340
341 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
342 #define jump_label_enabled static_key_enabled
343
344 /* -------------------------------------------------------------------------- */
345
346 /*
347  * Two type wrappers around static_key, such that we can use compile time
348  * type differentiation to emit the right code.
349  *
350  * All the below code is macros in order to play type games.
351  */
352
353 struct static_key_true {
354         struct static_key key;
355 };
356
357 struct static_key_false {
358         struct static_key key;
359 };
360
361 #define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
362 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
363
364 #define DEFINE_STATIC_KEY_TRUE(name)    \
365         struct static_key_true name = STATIC_KEY_TRUE_INIT
366
367 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
368         struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
369
370 #define DECLARE_STATIC_KEY_TRUE(name)   \
371         extern struct static_key_true name
372
373 #define DEFINE_STATIC_KEY_FALSE(name)   \
374         struct static_key_false name = STATIC_KEY_FALSE_INIT
375
376 #define DEFINE_STATIC_KEY_FALSE_RO(name)        \
377         struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
378
379 #define DECLARE_STATIC_KEY_FALSE(name)  \
380         extern struct static_key_false name
381
382 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count)               \
383         struct static_key_true name[count] = {                  \
384                 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT,     \
385         }
386
387 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count)              \
388         struct static_key_false name[count] = {                 \
389                 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT,    \
390         }
391
392 #define _DEFINE_STATIC_KEY_1(name)      DEFINE_STATIC_KEY_TRUE(name)
393 #define _DEFINE_STATIC_KEY_0(name)      DEFINE_STATIC_KEY_FALSE(name)
394 #define DEFINE_STATIC_KEY_MAYBE(cfg, name)                      \
395         __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
396
397 #define _DEFINE_STATIC_KEY_RO_1(name)   DEFINE_STATIC_KEY_TRUE_RO(name)
398 #define _DEFINE_STATIC_KEY_RO_0(name)   DEFINE_STATIC_KEY_FALSE_RO(name)
399 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name)                   \
400         __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
401
402 #define _DECLARE_STATIC_KEY_1(name)     DECLARE_STATIC_KEY_TRUE(name)
403 #define _DECLARE_STATIC_KEY_0(name)     DECLARE_STATIC_KEY_FALSE(name)
404 #define DECLARE_STATIC_KEY_MAYBE(cfg, name)                     \
405         __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
406
407 extern bool ____wrong_branch_error(void);
408
409 #define static_key_enabled(x)                                                   \
410 ({                                                                              \
411         if (!__builtin_types_compatible_p(typeof(*x), struct static_key) &&     \
412             !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
413             !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
414                 ____wrong_branch_error();                                       \
415         static_key_count((struct static_key *)x) > 0;                           \
416 })
417
418 #ifdef CONFIG_JUMP_LABEL
419
420 /*
421  * Combine the right initial value (type) with the right branch order
422  * to generate the desired result.
423  *
424  *
425  * type\branch| likely (1)            | unlikely (0)
426  * -----------+-----------------------+------------------
427  *            |                       |
428  *  true (1)  |    ...                |    ...
429  *            |    NOP                |    JMP L
430  *            |    <br-stmts>         | 1: ...
431  *            | L: ...                |
432  *            |                       |
433  *            |                       | L: <br-stmts>
434  *            |                       |    jmp 1b
435  *            |                       |
436  * -----------+-----------------------+------------------
437  *            |                       |
438  *  false (0) |    ...                |    ...
439  *            |    JMP L              |    NOP
440  *            |    <br-stmts>         | 1: ...
441  *            | L: ...                |
442  *            |                       |
443  *            |                       | L: <br-stmts>
444  *            |                       |    jmp 1b
445  *            |                       |
446  * -----------+-----------------------+------------------
447  *
448  * The initial value is encoded in the LSB of static_key::entries,
449  * type: 0 = false, 1 = true.
450  *
451  * The branch type is encoded in the LSB of jump_entry::key,
452  * branch: 0 = unlikely, 1 = likely.
453  *
454  * This gives the following logic table:
455  *
456  *      enabled type    branch    instuction
457  * -----------------------------+-----------
458  *      0       0       0       | NOP
459  *      0       0       1       | JMP
460  *      0       1       0       | NOP
461  *      0       1       1       | JMP
462  *
463  *      1       0       0       | JMP
464  *      1       0       1       | NOP
465  *      1       1       0       | JMP
466  *      1       1       1       | NOP
467  *
468  * Which gives the following functions:
469  *
470  *   dynamic: instruction = enabled ^ branch
471  *   static:  instruction = type ^ branch
472  *
473  * See jump_label_type() / jump_label_init_type().
474  */
475
476 #define static_branch_likely(x)                                                 \
477 ({                                                                              \
478         bool branch;                                                            \
479         if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))   \
480                 branch = !arch_static_branch(&(x)->key, true);                  \
481         else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
482                 branch = !arch_static_branch_jump(&(x)->key, true);             \
483         else                                                                    \
484                 branch = ____wrong_branch_error();                              \
485         likely_notrace(branch);                                                         \
486 })
487
488 #define static_branch_unlikely(x)                                               \
489 ({                                                                              \
490         bool branch;                                                            \
491         if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))   \
492                 branch = arch_static_branch_jump(&(x)->key, false);             \
493         else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
494                 branch = arch_static_branch(&(x)->key, false);                  \
495         else                                                                    \
496                 branch = ____wrong_branch_error();                              \
497         unlikely_notrace(branch);                                                       \
498 })
499
500 #else /* !CONFIG_JUMP_LABEL */
501
502 #define static_branch_likely(x)         likely_notrace(static_key_enabled(&(x)->key))
503 #define static_branch_unlikely(x)       unlikely_notrace(static_key_enabled(&(x)->key))
504
505 #endif /* CONFIG_JUMP_LABEL */
506
507 #define static_branch_maybe(config, x)                                  \
508         (IS_ENABLED(config) ? static_branch_likely(x)                   \
509                             : static_branch_unlikely(x))
510
511 /*
512  * Advanced usage; refcount, branch is enabled when: count != 0
513  */
514
515 #define static_branch_inc(x)            static_key_slow_inc(&(x)->key)
516 #define static_branch_dec(x)            static_key_slow_dec(&(x)->key)
517 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
518 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
519
520 /*
521  * Normal usage; boolean enable/disable.
522  */
523
524 #define static_branch_enable(x)                 static_key_enable(&(x)->key)
525 #define static_branch_disable(x)                static_key_disable(&(x)->key)
526 #define static_branch_enable_cpuslocked(x)      static_key_enable_cpuslocked(&(x)->key)
527 #define static_branch_disable_cpuslocked(x)     static_key_disable_cpuslocked(&(x)->key)
528
529 #endif /* __ASSEMBLY__ */
530
531 #endif  /* _LINUX_JUMP_LABEL_H */