Merge tag 'pci-v5.11-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / kernel / static_call.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/static_call.h>
4 #include <linux/bug.h>
5 #include <linux/smp.h>
6 #include <linux/sort.h>
7 #include <linux/slab.h>
8 #include <linux/module.h>
9 #include <linux/cpu.h>
10 #include <linux/processor.h>
11 #include <asm/sections.h>
12
13 extern struct static_call_site __start_static_call_sites[],
14                                __stop_static_call_sites[];
15
16 static bool static_call_initialized;
17
18 /* mutex to protect key modules/sites */
19 static DEFINE_MUTEX(static_call_mutex);
20
21 static void static_call_lock(void)
22 {
23         mutex_lock(&static_call_mutex);
24 }
25
26 static void static_call_unlock(void)
27 {
28         mutex_unlock(&static_call_mutex);
29 }
30
31 static inline void *static_call_addr(struct static_call_site *site)
32 {
33         return (void *)((long)site->addr + (long)&site->addr);
34 }
35
36
37 static inline struct static_call_key *static_call_key(const struct static_call_site *site)
38 {
39         return (struct static_call_key *)
40                 (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
41 }
42
43 /* These assume the key is word-aligned. */
44 static inline bool static_call_is_init(struct static_call_site *site)
45 {
46         return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
47 }
48
49 static inline bool static_call_is_tail(struct static_call_site *site)
50 {
51         return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
52 }
53
54 static inline void static_call_set_init(struct static_call_site *site)
55 {
56         site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
57                     (long)&site->key;
58 }
59
60 static int static_call_site_cmp(const void *_a, const void *_b)
61 {
62         const struct static_call_site *a = _a;
63         const struct static_call_site *b = _b;
64         const struct static_call_key *key_a = static_call_key(a);
65         const struct static_call_key *key_b = static_call_key(b);
66
67         if (key_a < key_b)
68                 return -1;
69
70         if (key_a > key_b)
71                 return 1;
72
73         return 0;
74 }
75
76 static void static_call_site_swap(void *_a, void *_b, int size)
77 {
78         long delta = (unsigned long)_a - (unsigned long)_b;
79         struct static_call_site *a = _a;
80         struct static_call_site *b = _b;
81         struct static_call_site tmp = *a;
82
83         a->addr = b->addr  - delta;
84         a->key  = b->key   - delta;
85
86         b->addr = tmp.addr + delta;
87         b->key  = tmp.key  + delta;
88 }
89
90 static inline void static_call_sort_entries(struct static_call_site *start,
91                                             struct static_call_site *stop)
92 {
93         sort(start, stop - start, sizeof(struct static_call_site),
94              static_call_site_cmp, static_call_site_swap);
95 }
96
97 static inline bool static_call_key_has_mods(struct static_call_key *key)
98 {
99         return !(key->type & 1);
100 }
101
102 static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
103 {
104         if (!static_call_key_has_mods(key))
105                 return NULL;
106
107         return key->mods;
108 }
109
110 static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
111 {
112         if (static_call_key_has_mods(key))
113                 return NULL;
114
115         return (struct static_call_site *)(key->type & ~1);
116 }
117
118 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
119 {
120         struct static_call_site *site, *stop;
121         struct static_call_mod *site_mod, first;
122
123         cpus_read_lock();
124         static_call_lock();
125
126         if (key->func == func)
127                 goto done;
128
129         key->func = func;
130
131         arch_static_call_transform(NULL, tramp, func, false);
132
133         /*
134          * If uninitialized, we'll not update the callsites, but they still
135          * point to the trampoline and we just patched that.
136          */
137         if (WARN_ON_ONCE(!static_call_initialized))
138                 goto done;
139
140         first = (struct static_call_mod){
141                 .next = static_call_key_next(key),
142                 .mod = NULL,
143                 .sites = static_call_key_sites(key),
144         };
145
146         for (site_mod = &first; site_mod; site_mod = site_mod->next) {
147                 struct module *mod = site_mod->mod;
148
149                 if (!site_mod->sites) {
150                         /*
151                          * This can happen if the static call key is defined in
152                          * a module which doesn't use it.
153                          *
154                          * It also happens in the has_mods case, where the
155                          * 'first' entry has no sites associated with it.
156                          */
157                         continue;
158                 }
159
160                 stop = __stop_static_call_sites;
161
162 #ifdef CONFIG_MODULES
163                 if (mod) {
164                         stop = mod->static_call_sites +
165                                mod->num_static_call_sites;
166                 }
167 #endif
168
169                 for (site = site_mod->sites;
170                      site < stop && static_call_key(site) == key; site++) {
171                         void *site_addr = static_call_addr(site);
172
173                         if (static_call_is_init(site)) {
174                                 /*
175                                  * Don't write to call sites which were in
176                                  * initmem and have since been freed.
177                                  */
178                                 if (!mod && system_state >= SYSTEM_RUNNING)
179                                         continue;
180                                 if (mod && !within_module_init((unsigned long)site_addr, mod))
181                                         continue;
182                         }
183
184                         if (!kernel_text_address((unsigned long)site_addr)) {
185                                 WARN_ONCE(1, "can't patch static call site at %pS",
186                                           site_addr);
187                                 continue;
188                         }
189
190                         arch_static_call_transform(site_addr, NULL, func,
191                                 static_call_is_tail(site));
192                 }
193         }
194
195 done:
196         static_call_unlock();
197         cpus_read_unlock();
198 }
199 EXPORT_SYMBOL_GPL(__static_call_update);
200
201 static int __static_call_init(struct module *mod,
202                               struct static_call_site *start,
203                               struct static_call_site *stop)
204 {
205         struct static_call_site *site;
206         struct static_call_key *key, *prev_key = NULL;
207         struct static_call_mod *site_mod;
208
209         if (start == stop)
210                 return 0;
211
212         static_call_sort_entries(start, stop);
213
214         for (site = start; site < stop; site++) {
215                 void *site_addr = static_call_addr(site);
216
217                 if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
218                     (!mod && init_section_contains(site_addr, 1)))
219                         static_call_set_init(site);
220
221                 key = static_call_key(site);
222                 if (key != prev_key) {
223                         prev_key = key;
224
225                         /*
226                          * For vmlinux (!mod) avoid the allocation by storing
227                          * the sites pointer in the key itself. Also see
228                          * __static_call_update()'s @first.
229                          *
230                          * This allows architectures (eg. x86) to call
231                          * static_call_init() before memory allocation works.
232                          */
233                         if (!mod) {
234                                 key->sites = site;
235                                 key->type |= 1;
236                                 goto do_transform;
237                         }
238
239                         site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
240                         if (!site_mod)
241                                 return -ENOMEM;
242
243                         /*
244                          * When the key has a direct sites pointer, extract
245                          * that into an explicit struct static_call_mod, so we
246                          * can have a list of modules.
247                          */
248                         if (static_call_key_sites(key)) {
249                                 site_mod->mod = NULL;
250                                 site_mod->next = NULL;
251                                 site_mod->sites = static_call_key_sites(key);
252
253                                 key->mods = site_mod;
254
255                                 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
256                                 if (!site_mod)
257                                         return -ENOMEM;
258                         }
259
260                         site_mod->mod = mod;
261                         site_mod->sites = site;
262                         site_mod->next = static_call_key_next(key);
263                         key->mods = site_mod;
264                 }
265
266 do_transform:
267                 arch_static_call_transform(site_addr, NULL, key->func,
268                                 static_call_is_tail(site));
269         }
270
271         return 0;
272 }
273
274 static int addr_conflict(struct static_call_site *site, void *start, void *end)
275 {
276         unsigned long addr = (unsigned long)static_call_addr(site);
277
278         if (addr <= (unsigned long)end &&
279             addr + CALL_INSN_SIZE > (unsigned long)start)
280                 return 1;
281
282         return 0;
283 }
284
285 static int __static_call_text_reserved(struct static_call_site *iter_start,
286                                        struct static_call_site *iter_stop,
287                                        void *start, void *end)
288 {
289         struct static_call_site *iter = iter_start;
290
291         while (iter < iter_stop) {
292                 if (addr_conflict(iter, start, end))
293                         return 1;
294                 iter++;
295         }
296
297         return 0;
298 }
299
300 #ifdef CONFIG_MODULES
301
302 static int __static_call_mod_text_reserved(void *start, void *end)
303 {
304         struct module *mod;
305         int ret;
306
307         preempt_disable();
308         mod = __module_text_address((unsigned long)start);
309         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
310         if (!try_module_get(mod))
311                 mod = NULL;
312         preempt_enable();
313
314         if (!mod)
315                 return 0;
316
317         ret = __static_call_text_reserved(mod->static_call_sites,
318                         mod->static_call_sites + mod->num_static_call_sites,
319                         start, end);
320
321         module_put(mod);
322
323         return ret;
324 }
325
326 static int static_call_add_module(struct module *mod)
327 {
328         return __static_call_init(mod, mod->static_call_sites,
329                                   mod->static_call_sites + mod->num_static_call_sites);
330 }
331
332 static void static_call_del_module(struct module *mod)
333 {
334         struct static_call_site *start = mod->static_call_sites;
335         struct static_call_site *stop = mod->static_call_sites +
336                                         mod->num_static_call_sites;
337         struct static_call_key *key, *prev_key = NULL;
338         struct static_call_mod *site_mod, **prev;
339         struct static_call_site *site;
340
341         for (site = start; site < stop; site++) {
342                 key = static_call_key(site);
343                 if (key == prev_key)
344                         continue;
345
346                 prev_key = key;
347
348                 for (prev = &key->mods, site_mod = key->mods;
349                      site_mod && site_mod->mod != mod;
350                      prev = &site_mod->next, site_mod = site_mod->next)
351                         ;
352
353                 if (!site_mod)
354                         continue;
355
356                 *prev = site_mod->next;
357                 kfree(site_mod);
358         }
359 }
360
361 static int static_call_module_notify(struct notifier_block *nb,
362                                      unsigned long val, void *data)
363 {
364         struct module *mod = data;
365         int ret = 0;
366
367         cpus_read_lock();
368         static_call_lock();
369
370         switch (val) {
371         case MODULE_STATE_COMING:
372                 ret = static_call_add_module(mod);
373                 if (ret) {
374                         WARN(1, "Failed to allocate memory for static calls");
375                         static_call_del_module(mod);
376                 }
377                 break;
378         case MODULE_STATE_GOING:
379                 static_call_del_module(mod);
380                 break;
381         }
382
383         static_call_unlock();
384         cpus_read_unlock();
385
386         return notifier_from_errno(ret);
387 }
388
389 static struct notifier_block static_call_module_nb = {
390         .notifier_call = static_call_module_notify,
391 };
392
393 #else
394
395 static inline int __static_call_mod_text_reserved(void *start, void *end)
396 {
397         return 0;
398 }
399
400 #endif /* CONFIG_MODULES */
401
402 int static_call_text_reserved(void *start, void *end)
403 {
404         int ret = __static_call_text_reserved(__start_static_call_sites,
405                         __stop_static_call_sites, start, end);
406
407         if (ret)
408                 return ret;
409
410         return __static_call_mod_text_reserved(start, end);
411 }
412
413 int __init static_call_init(void)
414 {
415         int ret;
416
417         if (static_call_initialized)
418                 return 0;
419
420         cpus_read_lock();
421         static_call_lock();
422         ret = __static_call_init(NULL, __start_static_call_sites,
423                                  __stop_static_call_sites);
424         static_call_unlock();
425         cpus_read_unlock();
426
427         if (ret) {
428                 pr_err("Failed to allocate memory for static_call!\n");
429                 BUG();
430         }
431
432         static_call_initialized = true;
433
434 #ifdef CONFIG_MODULES
435         register_module_notifier(&static_call_module_nb);
436 #endif
437         return 0;
438 }
439 early_initcall(static_call_init);
440
441 #ifdef CONFIG_STATIC_CALL_SELFTEST
442
443 static int func_a(int x)
444 {
445         return x+1;
446 }
447
448 static int func_b(int x)
449 {
450         return x+2;
451 }
452
453 DEFINE_STATIC_CALL(sc_selftest, func_a);
454
455 static struct static_call_data {
456       int (*func)(int);
457       int val;
458       int expect;
459 } static_call_data [] __initdata = {
460       { NULL,   2, 3 },
461       { func_b, 2, 4 },
462       { func_a, 2, 3 }
463 };
464
465 static int __init test_static_call_init(void)
466 {
467       int i;
468
469       for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
470               struct static_call_data *scd = &static_call_data[i];
471
472               if (scd->func)
473                       static_call_update(sc_selftest, scd->func);
474
475               WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
476       }
477
478       return 0;
479 }
480 early_initcall(test_static_call_init);
481
482 #endif /* CONFIG_STATIC_CALL_SELFTEST */