Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / mm / kfence / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/irq_work.h>
14 #include <linux/kcsan-checks.h>
15 #include <linux/kfence.h>
16 #include <linux/kmemleak.h>
17 #include <linux/list.h>
18 #include <linux/lockdep.h>
19 #include <linux/memblock.h>
20 #include <linux/moduleparam.h>
21 #include <linux/random.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
29
30 #include <asm/kfence.h>
31
32 #include "kfence.h"
33
34 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
35 #define KFENCE_WARN_ON(cond)                                                   \
36         ({                                                                     \
37                 const bool __cond = WARN_ON(cond);                             \
38                 if (unlikely(__cond))                                          \
39                         WRITE_ONCE(kfence_enabled, false);                     \
40                 __cond;                                                        \
41         })
42
43 /* === Data ================================================================= */
44
45 static bool kfence_enabled __read_mostly;
46
47 static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
48
49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX
51 #endif
52 #define MODULE_PARAM_PREFIX "kfence."
53
54 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
55 {
56         unsigned long num;
57         int ret = kstrtoul(val, 0, &num);
58
59         if (ret < 0)
60                 return ret;
61
62         if (!num) /* Using 0 to indicate KFENCE is disabled. */
63                 WRITE_ONCE(kfence_enabled, false);
64         else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
65                 return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
66
67         *((unsigned long *)kp->arg) = num;
68         return 0;
69 }
70
71 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
72 {
73         if (!READ_ONCE(kfence_enabled))
74                 return sprintf(buffer, "0\n");
75
76         return param_get_ulong(buffer, kp);
77 }
78
79 static const struct kernel_param_ops sample_interval_param_ops = {
80         .set = param_set_sample_interval,
81         .get = param_get_sample_interval,
82 };
83 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
84
85 /* The pool of pages used for guard pages and objects. */
86 char *__kfence_pool __ro_after_init;
87 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
88
89 /*
90  * Per-object metadata, with one-to-one mapping of object metadata to
91  * backing pages (in __kfence_pool).
92  */
93 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
94 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
95
96 /* Freelist with available objects. */
97 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
98 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
99
100 #ifdef CONFIG_KFENCE_STATIC_KEYS
101 /* The static key to set up a KFENCE allocation. */
102 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
103 #endif
104
105 /* Gates the allocation, ensuring only one succeeds in a given period. */
106 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
107
108 /* Statistics counters for debugfs. */
109 enum kfence_counter_id {
110         KFENCE_COUNTER_ALLOCATED,
111         KFENCE_COUNTER_ALLOCS,
112         KFENCE_COUNTER_FREES,
113         KFENCE_COUNTER_ZOMBIES,
114         KFENCE_COUNTER_BUGS,
115         KFENCE_COUNTER_COUNT,
116 };
117 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
118 static const char *const counter_names[] = {
119         [KFENCE_COUNTER_ALLOCATED]      = "currently allocated",
120         [KFENCE_COUNTER_ALLOCS]         = "total allocations",
121         [KFENCE_COUNTER_FREES]          = "total frees",
122         [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
123         [KFENCE_COUNTER_BUGS]           = "total bugs",
124 };
125 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
126
127 /* === Internals ============================================================ */
128
129 static bool kfence_protect(unsigned long addr)
130 {
131         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
132 }
133
134 static bool kfence_unprotect(unsigned long addr)
135 {
136         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
137 }
138
139 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
140 {
141         long index;
142
143         /* The checks do not affect performance; only called from slow-paths. */
144
145         if (!is_kfence_address((void *)addr))
146                 return NULL;
147
148         /*
149          * May be an invalid index if called with an address at the edge of
150          * __kfence_pool, in which case we would report an "invalid access"
151          * error.
152          */
153         index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
154         if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
155                 return NULL;
156
157         return &kfence_metadata[index];
158 }
159
160 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
161 {
162         unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
163         unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
164
165         /* The checks do not affect performance; only called from slow-paths. */
166
167         /* Only call with a pointer into kfence_metadata. */
168         if (KFENCE_WARN_ON(meta < kfence_metadata ||
169                            meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
170                 return 0;
171
172         /*
173          * This metadata object only ever maps to 1 page; verify that the stored
174          * address is in the expected range.
175          */
176         if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
177                 return 0;
178
179         return pageaddr;
180 }
181
182 /*
183  * Update the object's metadata state, including updating the alloc/free stacks
184  * depending on the state transition.
185  */
186 static noinline void metadata_update_state(struct kfence_metadata *meta,
187                                            enum kfence_object_state next)
188 {
189         struct kfence_track *track =
190                 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
191
192         lockdep_assert_held(&meta->lock);
193
194         /*
195          * Skip over 1 (this) functions; noinline ensures we do not accidentally
196          * skip over the caller by never inlining.
197          */
198         track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
199         track->pid = task_pid_nr(current);
200         track->cpu = raw_smp_processor_id();
201         track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
202
203         /*
204          * Pairs with READ_ONCE() in
205          *      kfence_shutdown_cache(),
206          *      kfence_handle_page_fault().
207          */
208         WRITE_ONCE(meta->state, next);
209 }
210
211 /* Write canary byte to @addr. */
212 static inline bool set_canary_byte(u8 *addr)
213 {
214         *addr = KFENCE_CANARY_PATTERN(addr);
215         return true;
216 }
217
218 /* Check canary byte at @addr. */
219 static inline bool check_canary_byte(u8 *addr)
220 {
221         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
222                 return true;
223
224         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
225         kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
226                             KFENCE_ERROR_CORRUPTION);
227         return false;
228 }
229
230 /* __always_inline this to ensure we won't do an indirect call to fn. */
231 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
232 {
233         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
234         unsigned long addr;
235
236         lockdep_assert_held(&meta->lock);
237
238         /*
239          * We'll iterate over each canary byte per-side until fn() returns
240          * false. However, we'll still iterate over the canary bytes to the
241          * right of the object even if there was an error in the canary bytes to
242          * the left of the object. Specifically, if check_canary_byte()
243          * generates an error, showing both sides might give more clues as to
244          * what the error is about when displaying which bytes were corrupted.
245          */
246
247         /* Apply to left of object. */
248         for (addr = pageaddr; addr < meta->addr; addr++) {
249                 if (!fn((u8 *)addr))
250                         break;
251         }
252
253         /* Apply to right of object. */
254         for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
255                 if (!fn((u8 *)addr))
256                         break;
257         }
258 }
259
260 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
261 {
262         struct kfence_metadata *meta = NULL;
263         unsigned long flags;
264         struct page *page;
265         void *addr;
266
267         /* Try to obtain a free object. */
268         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
269         if (!list_empty(&kfence_freelist)) {
270                 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
271                 list_del_init(&meta->list);
272         }
273         raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
274         if (!meta)
275                 return NULL;
276
277         if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
278                 /*
279                  * This is extremely unlikely -- we are reporting on a
280                  * use-after-free, which locked meta->lock, and the reporting
281                  * code via printk calls kmalloc() which ends up in
282                  * kfence_alloc() and tries to grab the same object that we're
283                  * reporting on. While it has never been observed, lockdep does
284                  * report that there is a possibility of deadlock. Fix it by
285                  * using trylock and bailing out gracefully.
286                  */
287                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
288                 /* Put the object back on the freelist. */
289                 list_add_tail(&meta->list, &kfence_freelist);
290                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
291
292                 return NULL;
293         }
294
295         meta->addr = metadata_to_pageaddr(meta);
296         /* Unprotect if we're reusing this page. */
297         if (meta->state == KFENCE_OBJECT_FREED)
298                 kfence_unprotect(meta->addr);
299
300         /*
301          * Note: for allocations made before RNG initialization, will always
302          * return zero. We still benefit from enabling KFENCE as early as
303          * possible, even when the RNG is not yet available, as this will allow
304          * KFENCE to detect bugs due to earlier allocations. The only downside
305          * is that the out-of-bounds accesses detected are deterministic for
306          * such allocations.
307          */
308         if (prandom_u32_max(2)) {
309                 /* Allocate on the "right" side, re-calculate address. */
310                 meta->addr += PAGE_SIZE - size;
311                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
312         }
313
314         addr = (void *)meta->addr;
315
316         /* Update remaining metadata. */
317         metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
318         /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
319         WRITE_ONCE(meta->cache, cache);
320         meta->size = size;
321         for_each_canary(meta, set_canary_byte);
322
323         /* Set required struct page fields. */
324         page = virt_to_page(meta->addr);
325         page->slab_cache = cache;
326         if (IS_ENABLED(CONFIG_SLUB))
327                 page->objects = 1;
328         if (IS_ENABLED(CONFIG_SLAB))
329                 page->s_mem = addr;
330
331         raw_spin_unlock_irqrestore(&meta->lock, flags);
332
333         /* Memory initialization. */
334
335         /*
336          * We check slab_want_init_on_alloc() ourselves, rather than letting
337          * SL*B do the initialization, as otherwise we might overwrite KFENCE's
338          * redzone.
339          */
340         if (unlikely(slab_want_init_on_alloc(gfp, cache)))
341                 memzero_explicit(addr, size);
342         if (cache->ctor)
343                 cache->ctor(addr);
344
345         if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
346                 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
347
348         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
349         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
350
351         return addr;
352 }
353
354 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
355 {
356         struct kcsan_scoped_access assert_page_exclusive;
357         unsigned long flags;
358
359         raw_spin_lock_irqsave(&meta->lock, flags);
360
361         if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
362                 /* Invalid or double-free, bail out. */
363                 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
364                 kfence_report_error((unsigned long)addr, false, NULL, meta,
365                                     KFENCE_ERROR_INVALID_FREE);
366                 raw_spin_unlock_irqrestore(&meta->lock, flags);
367                 return;
368         }
369
370         /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
371         kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
372                                   KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
373                                   &assert_page_exclusive);
374
375         if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
376                 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
377
378         /* Restore page protection if there was an OOB access. */
379         if (meta->unprotected_page) {
380                 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
381                 kfence_protect(meta->unprotected_page);
382                 meta->unprotected_page = 0;
383         }
384
385         /* Check canary bytes for memory corruption. */
386         for_each_canary(meta, check_canary_byte);
387
388         /*
389          * Clear memory if init-on-free is set. While we protect the page, the
390          * data is still there, and after a use-after-free is detected, we
391          * unprotect the page, so the data is still accessible.
392          */
393         if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
394                 memzero_explicit(addr, meta->size);
395
396         /* Mark the object as freed. */
397         metadata_update_state(meta, KFENCE_OBJECT_FREED);
398
399         raw_spin_unlock_irqrestore(&meta->lock, flags);
400
401         /* Protect to detect use-after-frees. */
402         kfence_protect((unsigned long)addr);
403
404         kcsan_end_scoped_access(&assert_page_exclusive);
405         if (!zombie) {
406                 /* Add it to the tail of the freelist for reuse. */
407                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
408                 KFENCE_WARN_ON(!list_empty(&meta->list));
409                 list_add_tail(&meta->list, &kfence_freelist);
410                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
411
412                 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
413                 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
414         } else {
415                 /* See kfence_shutdown_cache(). */
416                 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
417         }
418 }
419
420 static void rcu_guarded_free(struct rcu_head *h)
421 {
422         struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
423
424         kfence_guarded_free((void *)meta->addr, meta, false);
425 }
426
427 static bool __init kfence_init_pool(void)
428 {
429         unsigned long addr = (unsigned long)__kfence_pool;
430         struct page *pages;
431         int i;
432
433         if (!__kfence_pool)
434                 return false;
435
436         if (!arch_kfence_init_pool())
437                 goto err;
438
439         pages = virt_to_page(addr);
440
441         /*
442          * Set up object pages: they must have PG_slab set, to avoid freeing
443          * these as real pages.
444          *
445          * We also want to avoid inserting kfence_free() in the kfree()
446          * fast-path in SLUB, and therefore need to ensure kfree() correctly
447          * enters __slab_free() slow-path.
448          */
449         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
450                 if (!i || (i % 2))
451                         continue;
452
453                 /* Verify we do not have a compound head page. */
454                 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
455                         goto err;
456
457                 __SetPageSlab(&pages[i]);
458         }
459
460         /*
461          * Protect the first 2 pages. The first page is mostly unnecessary, and
462          * merely serves as an extended guard page. However, adding one
463          * additional page in the beginning gives us an even number of pages,
464          * which simplifies the mapping of address to metadata index.
465          */
466         for (i = 0; i < 2; i++) {
467                 if (unlikely(!kfence_protect(addr)))
468                         goto err;
469
470                 addr += PAGE_SIZE;
471         }
472
473         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
474                 struct kfence_metadata *meta = &kfence_metadata[i];
475
476                 /* Initialize metadata. */
477                 INIT_LIST_HEAD(&meta->list);
478                 raw_spin_lock_init(&meta->lock);
479                 meta->state = KFENCE_OBJECT_UNUSED;
480                 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
481                 list_add_tail(&meta->list, &kfence_freelist);
482
483                 /* Protect the right redzone. */
484                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
485                         goto err;
486
487                 addr += 2 * PAGE_SIZE;
488         }
489
490         /*
491          * The pool is live and will never be deallocated from this point on.
492          * Remove the pool object from the kmemleak object tree, as it would
493          * otherwise overlap with allocations returned by kfence_alloc(), which
494          * are registered with kmemleak through the slab post-alloc hook.
495          */
496         kmemleak_free(__kfence_pool);
497
498         return true;
499
500 err:
501         /*
502          * Only release unprotected pages, and do not try to go back and change
503          * page attributes due to risk of failing to do so as well. If changing
504          * page attributes for some pages fails, it is very likely that it also
505          * fails for the first page, and therefore expect addr==__kfence_pool in
506          * most failure cases.
507          */
508         memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
509         __kfence_pool = NULL;
510         return false;
511 }
512
513 /* === DebugFS Interface ==================================================== */
514
515 static int stats_show(struct seq_file *seq, void *v)
516 {
517         int i;
518
519         seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
520         for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
521                 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
522
523         return 0;
524 }
525 DEFINE_SHOW_ATTRIBUTE(stats);
526
527 /*
528  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
529  * start_object() and next_object() return the object index + 1, because NULL is used
530  * to stop iteration.
531  */
532 static void *start_object(struct seq_file *seq, loff_t *pos)
533 {
534         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
535                 return (void *)((long)*pos + 1);
536         return NULL;
537 }
538
539 static void stop_object(struct seq_file *seq, void *v)
540 {
541 }
542
543 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
544 {
545         ++*pos;
546         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
547                 return (void *)((long)*pos + 1);
548         return NULL;
549 }
550
551 static int show_object(struct seq_file *seq, void *v)
552 {
553         struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
554         unsigned long flags;
555
556         raw_spin_lock_irqsave(&meta->lock, flags);
557         kfence_print_object(seq, meta);
558         raw_spin_unlock_irqrestore(&meta->lock, flags);
559         seq_puts(seq, "---------------------------------\n");
560
561         return 0;
562 }
563
564 static const struct seq_operations object_seqops = {
565         .start = start_object,
566         .next = next_object,
567         .stop = stop_object,
568         .show = show_object,
569 };
570
571 static int open_objects(struct inode *inode, struct file *file)
572 {
573         return seq_open(file, &object_seqops);
574 }
575
576 static const struct file_operations objects_fops = {
577         .open = open_objects,
578         .read = seq_read,
579         .llseek = seq_lseek,
580 };
581
582 static int __init kfence_debugfs_init(void)
583 {
584         struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
585
586         debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
587         debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
588         return 0;
589 }
590
591 late_initcall(kfence_debugfs_init);
592
593 /* === Allocation Gate Timer ================================================ */
594
595 #ifdef CONFIG_KFENCE_STATIC_KEYS
596 /* Wait queue to wake up allocation-gate timer task. */
597 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
598
599 static void wake_up_kfence_timer(struct irq_work *work)
600 {
601         wake_up(&allocation_wait);
602 }
603 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
604 #endif
605
606 /*
607  * Set up delayed work, which will enable and disable the static key. We need to
608  * use a work queue (rather than a simple timer), since enabling and disabling a
609  * static key cannot be done from an interrupt.
610  *
611  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
612  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
613  * more aggressive sampling intervals), we could get away with a variant that
614  * avoids IPIs, at the cost of not immediately capturing allocations if the
615  * instructions remain cached.
616  */
617 static struct delayed_work kfence_timer;
618 static void toggle_allocation_gate(struct work_struct *work)
619 {
620         if (!READ_ONCE(kfence_enabled))
621                 return;
622
623         atomic_set(&kfence_allocation_gate, 0);
624 #ifdef CONFIG_KFENCE_STATIC_KEYS
625         /* Enable static key, and await allocation to happen. */
626         static_branch_enable(&kfence_allocation_key);
627
628         if (sysctl_hung_task_timeout_secs) {
629                 /*
630                  * During low activity with no allocations we might wait a
631                  * while; let's avoid the hung task warning.
632                  */
633                 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
634                                         sysctl_hung_task_timeout_secs * HZ / 2);
635         } else {
636                 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
637         }
638
639         /* Disable static key and reset timer. */
640         static_branch_disable(&kfence_allocation_key);
641 #endif
642         queue_delayed_work(system_unbound_wq, &kfence_timer,
643                            msecs_to_jiffies(kfence_sample_interval));
644 }
645 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
646
647 /* === Public interface ===================================================== */
648
649 void __init kfence_alloc_pool(void)
650 {
651         if (!kfence_sample_interval)
652                 return;
653
654         __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
655
656         if (!__kfence_pool)
657                 pr_err("failed to allocate pool\n");
658 }
659
660 void __init kfence_init(void)
661 {
662         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
663         if (!kfence_sample_interval)
664                 return;
665
666         if (!kfence_init_pool()) {
667                 pr_err("%s failed\n", __func__);
668                 return;
669         }
670
671         WRITE_ONCE(kfence_enabled, true);
672         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
673         pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
674                 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
675                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
676 }
677
678 void kfence_shutdown_cache(struct kmem_cache *s)
679 {
680         unsigned long flags;
681         struct kfence_metadata *meta;
682         int i;
683
684         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
685                 bool in_use;
686
687                 meta = &kfence_metadata[i];
688
689                 /*
690                  * If we observe some inconsistent cache and state pair where we
691                  * should have returned false here, cache destruction is racing
692                  * with either kmem_cache_alloc() or kmem_cache_free(). Taking
693                  * the lock will not help, as different critical section
694                  * serialization will have the same outcome.
695                  */
696                 if (READ_ONCE(meta->cache) != s ||
697                     READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
698                         continue;
699
700                 raw_spin_lock_irqsave(&meta->lock, flags);
701                 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
702                 raw_spin_unlock_irqrestore(&meta->lock, flags);
703
704                 if (in_use) {
705                         /*
706                          * This cache still has allocations, and we should not
707                          * release them back into the freelist so they can still
708                          * safely be used and retain the kernel's default
709                          * behaviour of keeping the allocations alive (leak the
710                          * cache); however, they effectively become "zombie
711                          * allocations" as the KFENCE objects are the only ones
712                          * still in use and the owning cache is being destroyed.
713                          *
714                          * We mark them freed, so that any subsequent use shows
715                          * more useful error messages that will include stack
716                          * traces of the user of the object, the original
717                          * allocation, and caller to shutdown_cache().
718                          */
719                         kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
720                 }
721         }
722
723         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
724                 meta = &kfence_metadata[i];
725
726                 /* See above. */
727                 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
728                         continue;
729
730                 raw_spin_lock_irqsave(&meta->lock, flags);
731                 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
732                         meta->cache = NULL;
733                 raw_spin_unlock_irqrestore(&meta->lock, flags);
734         }
735 }
736
737 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
738 {
739         /*
740          * Perform size check before switching kfence_allocation_gate, so that
741          * we don't disable KFENCE without making an allocation.
742          */
743         if (size > PAGE_SIZE)
744                 return NULL;
745
746         /*
747          * Skip allocations from non-default zones, including DMA. We cannot
748          * guarantee that pages in the KFENCE pool will have the requested
749          * properties (e.g. reside in DMAable memory).
750          */
751         if ((flags & GFP_ZONEMASK) ||
752             (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
753                 return NULL;
754
755         /*
756          * allocation_gate only needs to become non-zero, so it doesn't make
757          * sense to continue writing to it and pay the associated contention
758          * cost, in case we have a large number of concurrent allocations.
759          */
760         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
761                 return NULL;
762 #ifdef CONFIG_KFENCE_STATIC_KEYS
763         /*
764          * waitqueue_active() is fully ordered after the update of
765          * kfence_allocation_gate per atomic_inc_return().
766          */
767         if (waitqueue_active(&allocation_wait)) {
768                 /*
769                  * Calling wake_up() here may deadlock when allocations happen
770                  * from within timer code. Use an irq_work to defer it.
771                  */
772                 irq_work_queue(&wake_up_kfence_timer_work);
773         }
774 #endif
775
776         if (!READ_ONCE(kfence_enabled))
777                 return NULL;
778
779         return kfence_guarded_alloc(s, size, flags);
780 }
781
782 size_t kfence_ksize(const void *addr)
783 {
784         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
785
786         /*
787          * Read locklessly -- if there is a race with __kfence_alloc(), this is
788          * either a use-after-free or invalid access.
789          */
790         return meta ? meta->size : 0;
791 }
792
793 void *kfence_object_start(const void *addr)
794 {
795         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
796
797         /*
798          * Read locklessly -- if there is a race with __kfence_alloc(), this is
799          * either a use-after-free or invalid access.
800          */
801         return meta ? (void *)meta->addr : NULL;
802 }
803
804 void __kfence_free(void *addr)
805 {
806         struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
807
808         /*
809          * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
810          * the object, as the object page may be recycled for other-typed
811          * objects once it has been freed. meta->cache may be NULL if the cache
812          * was destroyed.
813          */
814         if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
815                 call_rcu(&meta->rcu_head, rcu_guarded_free);
816         else
817                 kfence_guarded_free(addr, meta, false);
818 }
819
820 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
821 {
822         const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
823         struct kfence_metadata *to_report = NULL;
824         enum kfence_error_type error_type;
825         unsigned long flags;
826
827         if (!is_kfence_address((void *)addr))
828                 return false;
829
830         if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
831                 return kfence_unprotect(addr); /* ... unprotect and proceed. */
832
833         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
834
835         if (page_index % 2) {
836                 /* This is a redzone, report a buffer overflow. */
837                 struct kfence_metadata *meta;
838                 int distance = 0;
839
840                 meta = addr_to_metadata(addr - PAGE_SIZE);
841                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
842                         to_report = meta;
843                         /* Data race ok; distance calculation approximate. */
844                         distance = addr - data_race(meta->addr + meta->size);
845                 }
846
847                 meta = addr_to_metadata(addr + PAGE_SIZE);
848                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
849                         /* Data race ok; distance calculation approximate. */
850                         if (!to_report || distance > data_race(meta->addr) - addr)
851                                 to_report = meta;
852                 }
853
854                 if (!to_report)
855                         goto out;
856
857                 raw_spin_lock_irqsave(&to_report->lock, flags);
858                 to_report->unprotected_page = addr;
859                 error_type = KFENCE_ERROR_OOB;
860
861                 /*
862                  * If the object was freed before we took the look we can still
863                  * report this as an OOB -- the report will simply show the
864                  * stacktrace of the free as well.
865                  */
866         } else {
867                 to_report = addr_to_metadata(addr);
868                 if (!to_report)
869                         goto out;
870
871                 raw_spin_lock_irqsave(&to_report->lock, flags);
872                 error_type = KFENCE_ERROR_UAF;
873                 /*
874                  * We may race with __kfence_alloc(), and it is possible that a
875                  * freed object may be reallocated. We simply report this as a
876                  * use-after-free, with the stack trace showing the place where
877                  * the object was re-allocated.
878                  */
879         }
880
881 out:
882         if (to_report) {
883                 kfence_report_error(addr, is_write, regs, to_report, error_type);
884                 raw_spin_unlock_irqrestore(&to_report->lock, flags);
885         } else {
886                 /* This may be a UAF or OOB access, but we can't be sure. */
887                 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
888         }
889
890         return kfence_unprotect(addr); /* Unprotect and let access proceed. */
891 }