Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / mm / kfence / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/kcsan-checks.h>
14 #include <linux/kfence.h>
15 #include <linux/kmemleak.h>
16 #include <linux/list.h>
17 #include <linux/lockdep.h>
18 #include <linux/memblock.h>
19 #include <linux/moduleparam.h>
20 #include <linux/random.h>
21 #include <linux/rcupdate.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26
27 #include <asm/kfence.h>
28
29 #include "kfence.h"
30
31 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
32 #define KFENCE_WARN_ON(cond)                                                   \
33         ({                                                                     \
34                 const bool __cond = WARN_ON(cond);                             \
35                 if (unlikely(__cond))                                          \
36                         WRITE_ONCE(kfence_enabled, false);                     \
37                 __cond;                                                        \
38         })
39
40 /* === Data ================================================================= */
41
42 static bool kfence_enabled __read_mostly;
43
44 static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
45
46 #ifdef MODULE_PARAM_PREFIX
47 #undef MODULE_PARAM_PREFIX
48 #endif
49 #define MODULE_PARAM_PREFIX "kfence."
50
51 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
52 {
53         unsigned long num;
54         int ret = kstrtoul(val, 0, &num);
55
56         if (ret < 0)
57                 return ret;
58
59         if (!num) /* Using 0 to indicate KFENCE is disabled. */
60                 WRITE_ONCE(kfence_enabled, false);
61         else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
62                 return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
63
64         *((unsigned long *)kp->arg) = num;
65         return 0;
66 }
67
68 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
69 {
70         if (!READ_ONCE(kfence_enabled))
71                 return sprintf(buffer, "0\n");
72
73         return param_get_ulong(buffer, kp);
74 }
75
76 static const struct kernel_param_ops sample_interval_param_ops = {
77         .set = param_set_sample_interval,
78         .get = param_get_sample_interval,
79 };
80 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
81
82 /* The pool of pages used for guard pages and objects. */
83 char *__kfence_pool __ro_after_init;
84 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
85
86 /*
87  * Per-object metadata, with one-to-one mapping of object metadata to
88  * backing pages (in __kfence_pool).
89  */
90 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
91 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
92
93 /* Freelist with available objects. */
94 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
95 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
96
97 #ifdef CONFIG_KFENCE_STATIC_KEYS
98 /* The static key to set up a KFENCE allocation. */
99 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
100 #endif
101
102 /* Gates the allocation, ensuring only one succeeds in a given period. */
103 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
104
105 /* Statistics counters for debugfs. */
106 enum kfence_counter_id {
107         KFENCE_COUNTER_ALLOCATED,
108         KFENCE_COUNTER_ALLOCS,
109         KFENCE_COUNTER_FREES,
110         KFENCE_COUNTER_ZOMBIES,
111         KFENCE_COUNTER_BUGS,
112         KFENCE_COUNTER_COUNT,
113 };
114 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
115 static const char *const counter_names[] = {
116         [KFENCE_COUNTER_ALLOCATED]      = "currently allocated",
117         [KFENCE_COUNTER_ALLOCS]         = "total allocations",
118         [KFENCE_COUNTER_FREES]          = "total frees",
119         [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
120         [KFENCE_COUNTER_BUGS]           = "total bugs",
121 };
122 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
123
124 /* === Internals ============================================================ */
125
126 static bool kfence_protect(unsigned long addr)
127 {
128         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
129 }
130
131 static bool kfence_unprotect(unsigned long addr)
132 {
133         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
134 }
135
136 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
137 {
138         long index;
139
140         /* The checks do not affect performance; only called from slow-paths. */
141
142         if (!is_kfence_address((void *)addr))
143                 return NULL;
144
145         /*
146          * May be an invalid index if called with an address at the edge of
147          * __kfence_pool, in which case we would report an "invalid access"
148          * error.
149          */
150         index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
151         if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
152                 return NULL;
153
154         return &kfence_metadata[index];
155 }
156
157 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
158 {
159         unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
160         unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
161
162         /* The checks do not affect performance; only called from slow-paths. */
163
164         /* Only call with a pointer into kfence_metadata. */
165         if (KFENCE_WARN_ON(meta < kfence_metadata ||
166                            meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
167                 return 0;
168
169         /*
170          * This metadata object only ever maps to 1 page; verify that the stored
171          * address is in the expected range.
172          */
173         if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
174                 return 0;
175
176         return pageaddr;
177 }
178
179 /*
180  * Update the object's metadata state, including updating the alloc/free stacks
181  * depending on the state transition.
182  */
183 static noinline void metadata_update_state(struct kfence_metadata *meta,
184                                            enum kfence_object_state next)
185 {
186         struct kfence_track *track =
187                 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
188
189         lockdep_assert_held(&meta->lock);
190
191         /*
192          * Skip over 1 (this) functions; noinline ensures we do not accidentally
193          * skip over the caller by never inlining.
194          */
195         track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
196         track->pid = task_pid_nr(current);
197
198         /*
199          * Pairs with READ_ONCE() in
200          *      kfence_shutdown_cache(),
201          *      kfence_handle_page_fault().
202          */
203         WRITE_ONCE(meta->state, next);
204 }
205
206 /* Write canary byte to @addr. */
207 static inline bool set_canary_byte(u8 *addr)
208 {
209         *addr = KFENCE_CANARY_PATTERN(addr);
210         return true;
211 }
212
213 /* Check canary byte at @addr. */
214 static inline bool check_canary_byte(u8 *addr)
215 {
216         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
217                 return true;
218
219         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
220         kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
221                             KFENCE_ERROR_CORRUPTION);
222         return false;
223 }
224
225 /* __always_inline this to ensure we won't do an indirect call to fn. */
226 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
227 {
228         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
229         unsigned long addr;
230
231         lockdep_assert_held(&meta->lock);
232
233         /*
234          * We'll iterate over each canary byte per-side until fn() returns
235          * false. However, we'll still iterate over the canary bytes to the
236          * right of the object even if there was an error in the canary bytes to
237          * the left of the object. Specifically, if check_canary_byte()
238          * generates an error, showing both sides might give more clues as to
239          * what the error is about when displaying which bytes were corrupted.
240          */
241
242         /* Apply to left of object. */
243         for (addr = pageaddr; addr < meta->addr; addr++) {
244                 if (!fn((u8 *)addr))
245                         break;
246         }
247
248         /* Apply to right of object. */
249         for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
250                 if (!fn((u8 *)addr))
251                         break;
252         }
253 }
254
255 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
256 {
257         struct kfence_metadata *meta = NULL;
258         unsigned long flags;
259         struct page *page;
260         void *addr;
261
262         /* Try to obtain a free object. */
263         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
264         if (!list_empty(&kfence_freelist)) {
265                 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
266                 list_del_init(&meta->list);
267         }
268         raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
269         if (!meta)
270                 return NULL;
271
272         if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
273                 /*
274                  * This is extremely unlikely -- we are reporting on a
275                  * use-after-free, which locked meta->lock, and the reporting
276                  * code via printk calls kmalloc() which ends up in
277                  * kfence_alloc() and tries to grab the same object that we're
278                  * reporting on. While it has never been observed, lockdep does
279                  * report that there is a possibility of deadlock. Fix it by
280                  * using trylock and bailing out gracefully.
281                  */
282                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
283                 /* Put the object back on the freelist. */
284                 list_add_tail(&meta->list, &kfence_freelist);
285                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
286
287                 return NULL;
288         }
289
290         meta->addr = metadata_to_pageaddr(meta);
291         /* Unprotect if we're reusing this page. */
292         if (meta->state == KFENCE_OBJECT_FREED)
293                 kfence_unprotect(meta->addr);
294
295         /*
296          * Note: for allocations made before RNG initialization, will always
297          * return zero. We still benefit from enabling KFENCE as early as
298          * possible, even when the RNG is not yet available, as this will allow
299          * KFENCE to detect bugs due to earlier allocations. The only downside
300          * is that the out-of-bounds accesses detected are deterministic for
301          * such allocations.
302          */
303         if (prandom_u32_max(2)) {
304                 /* Allocate on the "right" side, re-calculate address. */
305                 meta->addr += PAGE_SIZE - size;
306                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
307         }
308
309         addr = (void *)meta->addr;
310
311         /* Update remaining metadata. */
312         metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
313         /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
314         WRITE_ONCE(meta->cache, cache);
315         meta->size = size;
316         for_each_canary(meta, set_canary_byte);
317
318         /* Set required struct page fields. */
319         page = virt_to_page(meta->addr);
320         page->slab_cache = cache;
321         if (IS_ENABLED(CONFIG_SLUB))
322                 page->objects = 1;
323         if (IS_ENABLED(CONFIG_SLAB))
324                 page->s_mem = addr;
325
326         raw_spin_unlock_irqrestore(&meta->lock, flags);
327
328         /* Memory initialization. */
329
330         /*
331          * We check slab_want_init_on_alloc() ourselves, rather than letting
332          * SL*B do the initialization, as otherwise we might overwrite KFENCE's
333          * redzone.
334          */
335         if (unlikely(slab_want_init_on_alloc(gfp, cache)))
336                 memzero_explicit(addr, size);
337         if (cache->ctor)
338                 cache->ctor(addr);
339
340         if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
341                 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
342
343         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
344         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
345
346         return addr;
347 }
348
349 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
350 {
351         struct kcsan_scoped_access assert_page_exclusive;
352         unsigned long flags;
353
354         raw_spin_lock_irqsave(&meta->lock, flags);
355
356         if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
357                 /* Invalid or double-free, bail out. */
358                 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
359                 kfence_report_error((unsigned long)addr, false, NULL, meta,
360                                     KFENCE_ERROR_INVALID_FREE);
361                 raw_spin_unlock_irqrestore(&meta->lock, flags);
362                 return;
363         }
364
365         /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
366         kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
367                                   KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
368                                   &assert_page_exclusive);
369
370         if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
371                 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
372
373         /* Restore page protection if there was an OOB access. */
374         if (meta->unprotected_page) {
375                 kfence_protect(meta->unprotected_page);
376                 meta->unprotected_page = 0;
377         }
378
379         /* Check canary bytes for memory corruption. */
380         for_each_canary(meta, check_canary_byte);
381
382         /*
383          * Clear memory if init-on-free is set. While we protect the page, the
384          * data is still there, and after a use-after-free is detected, we
385          * unprotect the page, so the data is still accessible.
386          */
387         if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
388                 memzero_explicit(addr, meta->size);
389
390         /* Mark the object as freed. */
391         metadata_update_state(meta, KFENCE_OBJECT_FREED);
392
393         raw_spin_unlock_irqrestore(&meta->lock, flags);
394
395         /* Protect to detect use-after-frees. */
396         kfence_protect((unsigned long)addr);
397
398         kcsan_end_scoped_access(&assert_page_exclusive);
399         if (!zombie) {
400                 /* Add it to the tail of the freelist for reuse. */
401                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
402                 KFENCE_WARN_ON(!list_empty(&meta->list));
403                 list_add_tail(&meta->list, &kfence_freelist);
404                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
405
406                 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
407                 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
408         } else {
409                 /* See kfence_shutdown_cache(). */
410                 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
411         }
412 }
413
414 static void rcu_guarded_free(struct rcu_head *h)
415 {
416         struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
417
418         kfence_guarded_free((void *)meta->addr, meta, false);
419 }
420
421 static bool __init kfence_init_pool(void)
422 {
423         unsigned long addr = (unsigned long)__kfence_pool;
424         struct page *pages;
425         int i;
426
427         if (!__kfence_pool)
428                 return false;
429
430         if (!arch_kfence_init_pool())
431                 goto err;
432
433         pages = virt_to_page(addr);
434
435         /*
436          * Set up object pages: they must have PG_slab set, to avoid freeing
437          * these as real pages.
438          *
439          * We also want to avoid inserting kfence_free() in the kfree()
440          * fast-path in SLUB, and therefore need to ensure kfree() correctly
441          * enters __slab_free() slow-path.
442          */
443         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
444                 if (!i || (i % 2))
445                         continue;
446
447                 /* Verify we do not have a compound head page. */
448                 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
449                         goto err;
450
451                 __SetPageSlab(&pages[i]);
452         }
453
454         /*
455          * Protect the first 2 pages. The first page is mostly unnecessary, and
456          * merely serves as an extended guard page. However, adding one
457          * additional page in the beginning gives us an even number of pages,
458          * which simplifies the mapping of address to metadata index.
459          */
460         for (i = 0; i < 2; i++) {
461                 if (unlikely(!kfence_protect(addr)))
462                         goto err;
463
464                 addr += PAGE_SIZE;
465         }
466
467         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
468                 struct kfence_metadata *meta = &kfence_metadata[i];
469
470                 /* Initialize metadata. */
471                 INIT_LIST_HEAD(&meta->list);
472                 raw_spin_lock_init(&meta->lock);
473                 meta->state = KFENCE_OBJECT_UNUSED;
474                 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
475                 list_add_tail(&meta->list, &kfence_freelist);
476
477                 /* Protect the right redzone. */
478                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
479                         goto err;
480
481                 addr += 2 * PAGE_SIZE;
482         }
483
484         /*
485          * The pool is live and will never be deallocated from this point on.
486          * Remove the pool object from the kmemleak object tree, as it would
487          * otherwise overlap with allocations returned by kfence_alloc(), which
488          * are registered with kmemleak through the slab post-alloc hook.
489          */
490         kmemleak_free(__kfence_pool);
491
492         return true;
493
494 err:
495         /*
496          * Only release unprotected pages, and do not try to go back and change
497          * page attributes due to risk of failing to do so as well. If changing
498          * page attributes for some pages fails, it is very likely that it also
499          * fails for the first page, and therefore expect addr==__kfence_pool in
500          * most failure cases.
501          */
502         memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
503         __kfence_pool = NULL;
504         return false;
505 }
506
507 /* === DebugFS Interface ==================================================== */
508
509 static int stats_show(struct seq_file *seq, void *v)
510 {
511         int i;
512
513         seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
514         for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
515                 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
516
517         return 0;
518 }
519 DEFINE_SHOW_ATTRIBUTE(stats);
520
521 /*
522  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
523  * start_object() and next_object() return the object index + 1, because NULL is used
524  * to stop iteration.
525  */
526 static void *start_object(struct seq_file *seq, loff_t *pos)
527 {
528         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
529                 return (void *)((long)*pos + 1);
530         return NULL;
531 }
532
533 static void stop_object(struct seq_file *seq, void *v)
534 {
535 }
536
537 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
538 {
539         ++*pos;
540         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
541                 return (void *)((long)*pos + 1);
542         return NULL;
543 }
544
545 static int show_object(struct seq_file *seq, void *v)
546 {
547         struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
548         unsigned long flags;
549
550         raw_spin_lock_irqsave(&meta->lock, flags);
551         kfence_print_object(seq, meta);
552         raw_spin_unlock_irqrestore(&meta->lock, flags);
553         seq_puts(seq, "---------------------------------\n");
554
555         return 0;
556 }
557
558 static const struct seq_operations object_seqops = {
559         .start = start_object,
560         .next = next_object,
561         .stop = stop_object,
562         .show = show_object,
563 };
564
565 static int open_objects(struct inode *inode, struct file *file)
566 {
567         return seq_open(file, &object_seqops);
568 }
569
570 static const struct file_operations objects_fops = {
571         .open = open_objects,
572         .read = seq_read,
573         .llseek = seq_lseek,
574 };
575
576 static int __init kfence_debugfs_init(void)
577 {
578         struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
579
580         debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
581         debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
582         return 0;
583 }
584
585 late_initcall(kfence_debugfs_init);
586
587 /* === Allocation Gate Timer ================================================ */
588
589 /*
590  * Set up delayed work, which will enable and disable the static key. We need to
591  * use a work queue (rather than a simple timer), since enabling and disabling a
592  * static key cannot be done from an interrupt.
593  *
594  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
595  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
596  * more aggressive sampling intervals), we could get away with a variant that
597  * avoids IPIs, at the cost of not immediately capturing allocations if the
598  * instructions remain cached.
599  */
600 static struct delayed_work kfence_timer;
601 static void toggle_allocation_gate(struct work_struct *work)
602 {
603         if (!READ_ONCE(kfence_enabled))
604                 return;
605
606         /* Enable static key, and await allocation to happen. */
607         atomic_set(&kfence_allocation_gate, 0);
608 #ifdef CONFIG_KFENCE_STATIC_KEYS
609         static_branch_enable(&kfence_allocation_key);
610         /*
611          * Await an allocation. Timeout after 1 second, in case the kernel stops
612          * doing allocations, to avoid stalling this worker task for too long.
613          */
614         {
615                 unsigned long end_wait = jiffies + HZ;
616
617                 do {
618                         set_current_state(TASK_UNINTERRUPTIBLE);
619                         if (atomic_read(&kfence_allocation_gate) != 0)
620                                 break;
621                         schedule_timeout(1);
622                 } while (time_before(jiffies, end_wait));
623                 __set_current_state(TASK_RUNNING);
624         }
625         /* Disable static key and reset timer. */
626         static_branch_disable(&kfence_allocation_key);
627 #endif
628         schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval));
629 }
630 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
631
632 /* === Public interface ===================================================== */
633
634 void __init kfence_alloc_pool(void)
635 {
636         if (!kfence_sample_interval)
637                 return;
638
639         __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
640
641         if (!__kfence_pool)
642                 pr_err("failed to allocate pool\n");
643 }
644
645 void __init kfence_init(void)
646 {
647         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
648         if (!kfence_sample_interval)
649                 return;
650
651         if (!kfence_init_pool()) {
652                 pr_err("%s failed\n", __func__);
653                 return;
654         }
655
656         WRITE_ONCE(kfence_enabled, true);
657         schedule_delayed_work(&kfence_timer, 0);
658         pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
659                 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
660                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
661 }
662
663 void kfence_shutdown_cache(struct kmem_cache *s)
664 {
665         unsigned long flags;
666         struct kfence_metadata *meta;
667         int i;
668
669         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
670                 bool in_use;
671
672                 meta = &kfence_metadata[i];
673
674                 /*
675                  * If we observe some inconsistent cache and state pair where we
676                  * should have returned false here, cache destruction is racing
677                  * with either kmem_cache_alloc() or kmem_cache_free(). Taking
678                  * the lock will not help, as different critical section
679                  * serialization will have the same outcome.
680                  */
681                 if (READ_ONCE(meta->cache) != s ||
682                     READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
683                         continue;
684
685                 raw_spin_lock_irqsave(&meta->lock, flags);
686                 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
687                 raw_spin_unlock_irqrestore(&meta->lock, flags);
688
689                 if (in_use) {
690                         /*
691                          * This cache still has allocations, and we should not
692                          * release them back into the freelist so they can still
693                          * safely be used and retain the kernel's default
694                          * behaviour of keeping the allocations alive (leak the
695                          * cache); however, they effectively become "zombie
696                          * allocations" as the KFENCE objects are the only ones
697                          * still in use and the owning cache is being destroyed.
698                          *
699                          * We mark them freed, so that any subsequent use shows
700                          * more useful error messages that will include stack
701                          * traces of the user of the object, the original
702                          * allocation, and caller to shutdown_cache().
703                          */
704                         kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
705                 }
706         }
707
708         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
709                 meta = &kfence_metadata[i];
710
711                 /* See above. */
712                 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
713                         continue;
714
715                 raw_spin_lock_irqsave(&meta->lock, flags);
716                 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
717                         meta->cache = NULL;
718                 raw_spin_unlock_irqrestore(&meta->lock, flags);
719         }
720 }
721
722 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
723 {
724         /*
725          * allocation_gate only needs to become non-zero, so it doesn't make
726          * sense to continue writing to it and pay the associated contention
727          * cost, in case we have a large number of concurrent allocations.
728          */
729         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
730                 return NULL;
731
732         if (!READ_ONCE(kfence_enabled))
733                 return NULL;
734
735         if (size > PAGE_SIZE)
736                 return NULL;
737
738         return kfence_guarded_alloc(s, size, flags);
739 }
740
741 size_t kfence_ksize(const void *addr)
742 {
743         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
744
745         /*
746          * Read locklessly -- if there is a race with __kfence_alloc(), this is
747          * either a use-after-free or invalid access.
748          */
749         return meta ? meta->size : 0;
750 }
751
752 void *kfence_object_start(const void *addr)
753 {
754         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
755
756         /*
757          * Read locklessly -- if there is a race with __kfence_alloc(), this is
758          * either a use-after-free or invalid access.
759          */
760         return meta ? (void *)meta->addr : NULL;
761 }
762
763 void __kfence_free(void *addr)
764 {
765         struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
766
767         /*
768          * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
769          * the object, as the object page may be recycled for other-typed
770          * objects once it has been freed. meta->cache may be NULL if the cache
771          * was destroyed.
772          */
773         if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
774                 call_rcu(&meta->rcu_head, rcu_guarded_free);
775         else
776                 kfence_guarded_free(addr, meta, false);
777 }
778
779 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
780 {
781         const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
782         struct kfence_metadata *to_report = NULL;
783         enum kfence_error_type error_type;
784         unsigned long flags;
785
786         if (!is_kfence_address((void *)addr))
787                 return false;
788
789         if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
790                 return kfence_unprotect(addr); /* ... unprotect and proceed. */
791
792         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
793
794         if (page_index % 2) {
795                 /* This is a redzone, report a buffer overflow. */
796                 struct kfence_metadata *meta;
797                 int distance = 0;
798
799                 meta = addr_to_metadata(addr - PAGE_SIZE);
800                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
801                         to_report = meta;
802                         /* Data race ok; distance calculation approximate. */
803                         distance = addr - data_race(meta->addr + meta->size);
804                 }
805
806                 meta = addr_to_metadata(addr + PAGE_SIZE);
807                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
808                         /* Data race ok; distance calculation approximate. */
809                         if (!to_report || distance > data_race(meta->addr) - addr)
810                                 to_report = meta;
811                 }
812
813                 if (!to_report)
814                         goto out;
815
816                 raw_spin_lock_irqsave(&to_report->lock, flags);
817                 to_report->unprotected_page = addr;
818                 error_type = KFENCE_ERROR_OOB;
819
820                 /*
821                  * If the object was freed before we took the look we can still
822                  * report this as an OOB -- the report will simply show the
823                  * stacktrace of the free as well.
824                  */
825         } else {
826                 to_report = addr_to_metadata(addr);
827                 if (!to_report)
828                         goto out;
829
830                 raw_spin_lock_irqsave(&to_report->lock, flags);
831                 error_type = KFENCE_ERROR_UAF;
832                 /*
833                  * We may race with __kfence_alloc(), and it is possible that a
834                  * freed object may be reallocated. We simply report this as a
835                  * use-after-free, with the stack trace showing the place where
836                  * the object was re-allocated.
837                  */
838         }
839
840 out:
841         if (to_report) {
842                 kfence_report_error(addr, is_write, regs, to_report, error_type);
843                 raw_spin_unlock_irqrestore(&to_report->lock, flags);
844         } else {
845                 /* This may be a UAF or OOB access, but we can't be sure. */
846                 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
847         }
848
849         return kfence_unprotect(addr); /* Unprotect and let access proceed. */
850 }