1 // SPDX-License-Identifier: GPL-2.0
3 * KFENCE guarded object allocator and fault handling.
5 * Copyright (C) 2020, Google LLC.
8 #define pr_fmt(fmt) "kfence: " fmt
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/random.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/sysctl.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
33 #include <asm/kfence.h>
37 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
38 #define KFENCE_WARN_ON(cond) \
40 const bool __cond = WARN_ON(cond); \
41 if (unlikely(__cond)) { \
42 WRITE_ONCE(kfence_enabled, false); \
43 disabled_by_warn = true; \
48 /* === Data ================================================================= */
50 static bool kfence_enabled __read_mostly;
51 static bool disabled_by_warn __read_mostly;
53 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
54 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56 #ifdef MODULE_PARAM_PREFIX
57 #undef MODULE_PARAM_PREFIX
59 #define MODULE_PARAM_PREFIX "kfence."
61 static int kfence_enable_late(void);
62 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
65 int ret = kstrtoul(val, 0, &num);
70 if (!num) /* Using 0 to indicate KFENCE is disabled. */
71 WRITE_ONCE(kfence_enabled, false);
73 *((unsigned long *)kp->arg) = num;
75 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
76 return disabled_by_warn ? -EINVAL : kfence_enable_late();
80 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
82 if (!READ_ONCE(kfence_enabled))
83 return sprintf(buffer, "0\n");
85 return param_get_ulong(buffer, kp);
88 static const struct kernel_param_ops sample_interval_param_ops = {
89 .set = param_set_sample_interval,
90 .get = param_get_sample_interval,
92 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
94 /* Pool usage% threshold when currently covered allocations are skipped. */
95 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
96 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
98 /* The pool of pages used for guard pages and objects. */
99 char *__kfence_pool __read_mostly;
100 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
103 * Per-object metadata, with one-to-one mapping of object metadata to
104 * backing pages (in __kfence_pool).
106 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
107 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
109 /* Freelist with available objects. */
110 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
111 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
114 * The static key to set up a KFENCE allocation; or if static keys are not used
115 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
117 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
119 /* Gates the allocation, ensuring only one succeeds in a given period. */
120 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
123 * A Counting Bloom filter of allocation coverage: limits currently covered
124 * allocations of the same source filling up the pool.
126 * Assuming a range of 15%-85% unique allocations in the pool at any point in
127 * time, the below parameters provide a probablity of 0.02-0.33 for false
128 * positive hits respectively:
130 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
132 #define ALLOC_COVERED_HNUM 2
133 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
134 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
135 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
136 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
137 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
139 /* Stack depth used to determine uniqueness of an allocation. */
140 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
143 * Randomness for stack hashes, making the same collisions across reboots and
144 * different machines less likely.
146 static u32 stack_hash_seed __ro_after_init;
148 /* Statistics counters for debugfs. */
149 enum kfence_counter_id {
150 KFENCE_COUNTER_ALLOCATED,
151 KFENCE_COUNTER_ALLOCS,
152 KFENCE_COUNTER_FREES,
153 KFENCE_COUNTER_ZOMBIES,
155 KFENCE_COUNTER_SKIP_INCOMPAT,
156 KFENCE_COUNTER_SKIP_CAPACITY,
157 KFENCE_COUNTER_SKIP_COVERED,
158 KFENCE_COUNTER_COUNT,
160 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
161 static const char *const counter_names[] = {
162 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
163 [KFENCE_COUNTER_ALLOCS] = "total allocations",
164 [KFENCE_COUNTER_FREES] = "total frees",
165 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
166 [KFENCE_COUNTER_BUGS] = "total bugs",
167 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
168 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
169 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
171 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
173 /* === Internals ============================================================ */
175 static inline bool should_skip_covered(void)
177 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
179 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
182 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
184 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
185 num_entries = filter_irq_stacks(stack_entries, num_entries);
186 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
190 * Adds (or subtracts) count @val for allocation stack trace hash
191 * @alloc_stack_hash from Counting Bloom filter.
193 static void alloc_covered_add(u32 alloc_stack_hash, int val)
197 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
198 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
199 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
204 * Returns true if the allocation stack trace hash @alloc_stack_hash is
205 * currently contained (non-zero count) in Counting Bloom filter.
207 static bool alloc_covered_contains(u32 alloc_stack_hash)
211 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
212 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
214 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
220 static bool kfence_protect(unsigned long addr)
222 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
225 static bool kfence_unprotect(unsigned long addr)
227 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
230 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
234 /* The checks do not affect performance; only called from slow-paths. */
236 if (!is_kfence_address((void *)addr))
240 * May be an invalid index if called with an address at the edge of
241 * __kfence_pool, in which case we would report an "invalid access"
244 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
245 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
248 return &kfence_metadata[index];
251 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
253 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
254 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
256 /* The checks do not affect performance; only called from slow-paths. */
258 /* Only call with a pointer into kfence_metadata. */
259 if (KFENCE_WARN_ON(meta < kfence_metadata ||
260 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
264 * This metadata object only ever maps to 1 page; verify that the stored
265 * address is in the expected range.
267 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
274 * Update the object's metadata state, including updating the alloc/free stacks
275 * depending on the state transition.
278 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
279 unsigned long *stack_entries, size_t num_stack_entries)
281 struct kfence_track *track =
282 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
284 lockdep_assert_held(&meta->lock);
287 memcpy(track->stack_entries, stack_entries,
288 num_stack_entries * sizeof(stack_entries[0]));
291 * Skip over 1 (this) functions; noinline ensures we do not
292 * accidentally skip over the caller by never inlining.
294 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
296 track->num_stack_entries = num_stack_entries;
297 track->pid = task_pid_nr(current);
298 track->cpu = raw_smp_processor_id();
299 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
302 * Pairs with READ_ONCE() in
303 * kfence_shutdown_cache(),
304 * kfence_handle_page_fault().
306 WRITE_ONCE(meta->state, next);
309 /* Write canary byte to @addr. */
310 static inline bool set_canary_byte(u8 *addr)
312 *addr = KFENCE_CANARY_PATTERN(addr);
316 /* Check canary byte at @addr. */
317 static inline bool check_canary_byte(u8 *addr)
319 struct kfence_metadata *meta;
322 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
325 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
327 meta = addr_to_metadata((unsigned long)addr);
328 raw_spin_lock_irqsave(&meta->lock, flags);
329 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
330 raw_spin_unlock_irqrestore(&meta->lock, flags);
335 /* __always_inline this to ensure we won't do an indirect call to fn. */
336 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
338 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
342 * We'll iterate over each canary byte per-side until fn() returns
343 * false. However, we'll still iterate over the canary bytes to the
344 * right of the object even if there was an error in the canary bytes to
345 * the left of the object. Specifically, if check_canary_byte()
346 * generates an error, showing both sides might give more clues as to
347 * what the error is about when displaying which bytes were corrupted.
350 /* Apply to left of object. */
351 for (addr = pageaddr; addr < meta->addr; addr++) {
356 /* Apply to right of object. */
357 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
363 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
364 unsigned long *stack_entries, size_t num_stack_entries,
365 u32 alloc_stack_hash)
367 struct kfence_metadata *meta = NULL;
372 /* Try to obtain a free object. */
373 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
374 if (!list_empty(&kfence_freelist)) {
375 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
376 list_del_init(&meta->list);
378 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
380 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
384 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
386 * This is extremely unlikely -- we are reporting on a
387 * use-after-free, which locked meta->lock, and the reporting
388 * code via printk calls kmalloc() which ends up in
389 * kfence_alloc() and tries to grab the same object that we're
390 * reporting on. While it has never been observed, lockdep does
391 * report that there is a possibility of deadlock. Fix it by
392 * using trylock and bailing out gracefully.
394 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
395 /* Put the object back on the freelist. */
396 list_add_tail(&meta->list, &kfence_freelist);
397 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
402 meta->addr = metadata_to_pageaddr(meta);
403 /* Unprotect if we're reusing this page. */
404 if (meta->state == KFENCE_OBJECT_FREED)
405 kfence_unprotect(meta->addr);
408 * Note: for allocations made before RNG initialization, will always
409 * return zero. We still benefit from enabling KFENCE as early as
410 * possible, even when the RNG is not yet available, as this will allow
411 * KFENCE to detect bugs due to earlier allocations. The only downside
412 * is that the out-of-bounds accesses detected are deterministic for
415 if (prandom_u32_max(2)) {
416 /* Allocate on the "right" side, re-calculate address. */
417 meta->addr += PAGE_SIZE - size;
418 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
421 addr = (void *)meta->addr;
423 /* Update remaining metadata. */
424 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
425 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
426 WRITE_ONCE(meta->cache, cache);
428 meta->alloc_stack_hash = alloc_stack_hash;
429 raw_spin_unlock_irqrestore(&meta->lock, flags);
431 alloc_covered_add(alloc_stack_hash, 1);
433 /* Set required slab fields. */
434 slab = virt_to_slab((void *)meta->addr);
435 slab->slab_cache = cache;
436 #if defined(CONFIG_SLUB)
438 #elif defined(CONFIG_SLAB)
442 /* Memory initialization. */
443 for_each_canary(meta, set_canary_byte);
446 * We check slab_want_init_on_alloc() ourselves, rather than letting
447 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
450 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
451 memzero_explicit(addr, size);
455 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
456 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
458 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
459 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
464 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
466 struct kcsan_scoped_access assert_page_exclusive;
470 raw_spin_lock_irqsave(&meta->lock, flags);
472 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
473 /* Invalid or double-free, bail out. */
474 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
475 kfence_report_error((unsigned long)addr, false, NULL, meta,
476 KFENCE_ERROR_INVALID_FREE);
477 raw_spin_unlock_irqrestore(&meta->lock, flags);
481 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
482 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
483 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
484 &assert_page_exclusive);
486 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
487 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
489 /* Restore page protection if there was an OOB access. */
490 if (meta->unprotected_page) {
491 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
492 kfence_protect(meta->unprotected_page);
493 meta->unprotected_page = 0;
496 /* Mark the object as freed. */
497 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
498 init = slab_want_init_on_free(meta->cache);
499 raw_spin_unlock_irqrestore(&meta->lock, flags);
501 alloc_covered_add(meta->alloc_stack_hash, -1);
503 /* Check canary bytes for memory corruption. */
504 for_each_canary(meta, check_canary_byte);
507 * Clear memory if init-on-free is set. While we protect the page, the
508 * data is still there, and after a use-after-free is detected, we
509 * unprotect the page, so the data is still accessible.
511 if (!zombie && unlikely(init))
512 memzero_explicit(addr, meta->size);
514 /* Protect to detect use-after-frees. */
515 kfence_protect((unsigned long)addr);
517 kcsan_end_scoped_access(&assert_page_exclusive);
519 /* Add it to the tail of the freelist for reuse. */
520 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
521 KFENCE_WARN_ON(!list_empty(&meta->list));
522 list_add_tail(&meta->list, &kfence_freelist);
523 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
525 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
526 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
528 /* See kfence_shutdown_cache(). */
529 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
533 static void rcu_guarded_free(struct rcu_head *h)
535 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
537 kfence_guarded_free((void *)meta->addr, meta, false);
541 * Initialization of the KFENCE pool after its allocation.
542 * Returns 0 on success; otherwise returns the address up to
543 * which partial initialization succeeded.
545 static unsigned long kfence_init_pool(void)
547 unsigned long addr = (unsigned long)__kfence_pool;
551 if (!arch_kfence_init_pool())
554 pages = virt_to_page(addr);
557 * Set up object pages: they must have PG_slab set, to avoid freeing
558 * these as real pages.
560 * We also want to avoid inserting kfence_free() in the kfree()
561 * fast-path in SLUB, and therefore need to ensure kfree() correctly
562 * enters __slab_free() slow-path.
564 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
568 /* Verify we do not have a compound head page. */
569 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
572 __SetPageSlab(&pages[i]);
576 * Protect the first 2 pages. The first page is mostly unnecessary, and
577 * merely serves as an extended guard page. However, adding one
578 * additional page in the beginning gives us an even number of pages,
579 * which simplifies the mapping of address to metadata index.
581 for (i = 0; i < 2; i++) {
582 if (unlikely(!kfence_protect(addr)))
588 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
589 struct kfence_metadata *meta = &kfence_metadata[i];
591 /* Initialize metadata. */
592 INIT_LIST_HEAD(&meta->list);
593 raw_spin_lock_init(&meta->lock);
594 meta->state = KFENCE_OBJECT_UNUSED;
595 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
596 list_add_tail(&meta->list, &kfence_freelist);
598 /* Protect the right redzone. */
599 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
602 addr += 2 * PAGE_SIZE;
606 * The pool is live and will never be deallocated from this point on.
607 * Remove the pool object from the kmemleak object tree, as it would
608 * otherwise overlap with allocations returned by kfence_alloc(), which
609 * are registered with kmemleak through the slab post-alloc hook.
611 kmemleak_free(__kfence_pool);
616 static bool __init kfence_init_pool_early(void)
623 addr = kfence_init_pool();
629 * Only release unprotected pages, and do not try to go back and change
630 * page attributes due to risk of failing to do so as well. If changing
631 * page attributes for some pages fails, it is very likely that it also
632 * fails for the first page, and therefore expect addr==__kfence_pool in
633 * most failure cases.
635 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
636 __kfence_pool = NULL;
640 static bool kfence_init_pool_late(void)
642 unsigned long addr, free_size;
644 addr = kfence_init_pool();
650 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
651 #ifdef CONFIG_CONTIG_ALLOC
652 free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
654 free_pages_exact((void *)addr, free_size);
656 __kfence_pool = NULL;
660 /* === DebugFS Interface ==================================================== */
662 static int stats_show(struct seq_file *seq, void *v)
666 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
667 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
668 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
672 DEFINE_SHOW_ATTRIBUTE(stats);
675 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
676 * start_object() and next_object() return the object index + 1, because NULL is used
679 static void *start_object(struct seq_file *seq, loff_t *pos)
681 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
682 return (void *)((long)*pos + 1);
686 static void stop_object(struct seq_file *seq, void *v)
690 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
693 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
694 return (void *)((long)*pos + 1);
698 static int show_object(struct seq_file *seq, void *v)
700 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
703 raw_spin_lock_irqsave(&meta->lock, flags);
704 kfence_print_object(seq, meta);
705 raw_spin_unlock_irqrestore(&meta->lock, flags);
706 seq_puts(seq, "---------------------------------\n");
711 static const struct seq_operations object_seqops = {
712 .start = start_object,
718 static int open_objects(struct inode *inode, struct file *file)
720 return seq_open(file, &object_seqops);
723 static const struct file_operations objects_fops = {
724 .open = open_objects,
727 .release = seq_release,
730 static int __init kfence_debugfs_init(void)
732 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
734 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
735 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
739 late_initcall(kfence_debugfs_init);
741 /* === Allocation Gate Timer ================================================ */
743 #ifdef CONFIG_KFENCE_STATIC_KEYS
744 /* Wait queue to wake up allocation-gate timer task. */
745 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
747 static void wake_up_kfence_timer(struct irq_work *work)
749 wake_up(&allocation_wait);
751 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
755 * Set up delayed work, which will enable and disable the static key. We need to
756 * use a work queue (rather than a simple timer), since enabling and disabling a
757 * static key cannot be done from an interrupt.
759 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
760 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
761 * more aggressive sampling intervals), we could get away with a variant that
762 * avoids IPIs, at the cost of not immediately capturing allocations if the
763 * instructions remain cached.
765 static struct delayed_work kfence_timer;
766 static void toggle_allocation_gate(struct work_struct *work)
768 if (!READ_ONCE(kfence_enabled))
771 atomic_set(&kfence_allocation_gate, 0);
772 #ifdef CONFIG_KFENCE_STATIC_KEYS
773 /* Enable static key, and await allocation to happen. */
774 static_branch_enable(&kfence_allocation_key);
776 if (sysctl_hung_task_timeout_secs) {
778 * During low activity with no allocations we might wait a
779 * while; let's avoid the hung task warning.
781 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
782 sysctl_hung_task_timeout_secs * HZ / 2);
784 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
787 /* Disable static key and reset timer. */
788 static_branch_disable(&kfence_allocation_key);
790 queue_delayed_work(system_unbound_wq, &kfence_timer,
791 msecs_to_jiffies(kfence_sample_interval));
793 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
795 /* === Public interface ===================================================== */
797 void __init kfence_alloc_pool(void)
799 if (!kfence_sample_interval)
802 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
805 pr_err("failed to allocate pool\n");
808 static void kfence_init_enable(void)
810 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
811 static_branch_enable(&kfence_allocation_key);
812 WRITE_ONCE(kfence_enabled, true);
813 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
814 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
815 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
816 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
819 void __init kfence_init(void)
821 stack_hash_seed = (u32)random_get_entropy();
823 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
824 if (!kfence_sample_interval)
827 if (!kfence_init_pool_early()) {
828 pr_err("%s failed\n", __func__);
832 kfence_init_enable();
835 static int kfence_init_late(void)
837 const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
838 #ifdef CONFIG_CONTIG_ALLOC
841 pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
844 __kfence_pool = page_to_virt(pages);
846 if (nr_pages > MAX_ORDER_NR_PAGES) {
847 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
850 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
855 if (!kfence_init_pool_late()) {
856 pr_err("%s failed\n", __func__);
860 kfence_init_enable();
864 static int kfence_enable_late(void)
867 return kfence_init_late();
869 WRITE_ONCE(kfence_enabled, true);
870 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
874 void kfence_shutdown_cache(struct kmem_cache *s)
877 struct kfence_metadata *meta;
880 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
883 meta = &kfence_metadata[i];
886 * If we observe some inconsistent cache and state pair where we
887 * should have returned false here, cache destruction is racing
888 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
889 * the lock will not help, as different critical section
890 * serialization will have the same outcome.
892 if (READ_ONCE(meta->cache) != s ||
893 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
896 raw_spin_lock_irqsave(&meta->lock, flags);
897 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
898 raw_spin_unlock_irqrestore(&meta->lock, flags);
902 * This cache still has allocations, and we should not
903 * release them back into the freelist so they can still
904 * safely be used and retain the kernel's default
905 * behaviour of keeping the allocations alive (leak the
906 * cache); however, they effectively become "zombie
907 * allocations" as the KFENCE objects are the only ones
908 * still in use and the owning cache is being destroyed.
910 * We mark them freed, so that any subsequent use shows
911 * more useful error messages that will include stack
912 * traces of the user of the object, the original
913 * allocation, and caller to shutdown_cache().
915 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
919 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
920 meta = &kfence_metadata[i];
923 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
926 raw_spin_lock_irqsave(&meta->lock, flags);
927 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
929 raw_spin_unlock_irqrestore(&meta->lock, flags);
933 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
935 unsigned long stack_entries[KFENCE_STACK_DEPTH];
936 size_t num_stack_entries;
937 u32 alloc_stack_hash;
940 * Perform size check before switching kfence_allocation_gate, so that
941 * we don't disable KFENCE without making an allocation.
943 if (size > PAGE_SIZE) {
944 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
949 * Skip allocations from non-default zones, including DMA. We cannot
950 * guarantee that pages in the KFENCE pool will have the requested
951 * properties (e.g. reside in DMAable memory).
953 if ((flags & GFP_ZONEMASK) ||
954 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
955 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
959 if (atomic_inc_return(&kfence_allocation_gate) > 1)
961 #ifdef CONFIG_KFENCE_STATIC_KEYS
963 * waitqueue_active() is fully ordered after the update of
964 * kfence_allocation_gate per atomic_inc_return().
966 if (waitqueue_active(&allocation_wait)) {
968 * Calling wake_up() here may deadlock when allocations happen
969 * from within timer code. Use an irq_work to defer it.
971 irq_work_queue(&wake_up_kfence_timer_work);
975 if (!READ_ONCE(kfence_enabled))
978 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
981 * Do expensive check for coverage of allocation in slow-path after
982 * allocation_gate has already become non-zero, even though it might
983 * mean not making any allocation within a given sample interval.
985 * This ensures reasonable allocation coverage when the pool is almost
986 * full, including avoiding long-lived allocations of the same source
987 * filling up the pool (e.g. pagecache allocations).
989 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
990 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
991 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
995 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
999 size_t kfence_ksize(const void *addr)
1001 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1004 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1005 * either a use-after-free or invalid access.
1007 return meta ? meta->size : 0;
1010 void *kfence_object_start(const void *addr)
1012 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1015 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1016 * either a use-after-free or invalid access.
1018 return meta ? (void *)meta->addr : NULL;
1021 void __kfence_free(void *addr)
1023 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1026 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1027 * the object, as the object page may be recycled for other-typed
1028 * objects once it has been freed. meta->cache may be NULL if the cache
1031 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1032 call_rcu(&meta->rcu_head, rcu_guarded_free);
1034 kfence_guarded_free(addr, meta, false);
1037 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1039 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1040 struct kfence_metadata *to_report = NULL;
1041 enum kfence_error_type error_type;
1042 unsigned long flags;
1044 if (!is_kfence_address((void *)addr))
1047 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1048 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1050 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1052 if (page_index % 2) {
1053 /* This is a redzone, report a buffer overflow. */
1054 struct kfence_metadata *meta;
1057 meta = addr_to_metadata(addr - PAGE_SIZE);
1058 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1060 /* Data race ok; distance calculation approximate. */
1061 distance = addr - data_race(meta->addr + meta->size);
1064 meta = addr_to_metadata(addr + PAGE_SIZE);
1065 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1066 /* Data race ok; distance calculation approximate. */
1067 if (!to_report || distance > data_race(meta->addr) - addr)
1074 raw_spin_lock_irqsave(&to_report->lock, flags);
1075 to_report->unprotected_page = addr;
1076 error_type = KFENCE_ERROR_OOB;
1079 * If the object was freed before we took the look we can still
1080 * report this as an OOB -- the report will simply show the
1081 * stacktrace of the free as well.
1084 to_report = addr_to_metadata(addr);
1088 raw_spin_lock_irqsave(&to_report->lock, flags);
1089 error_type = KFENCE_ERROR_UAF;
1091 * We may race with __kfence_alloc(), and it is possible that a
1092 * freed object may be reallocated. We simply report this as a
1093 * use-after-free, with the stack trace showing the place where
1094 * the object was re-allocated.
1100 kfence_report_error(addr, is_write, regs, to_report, error_type);
1101 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1103 /* This may be a UAF or OOB access, but we can't be sure. */
1104 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1107 return kfence_unprotect(addr); /* Unprotect and let access proceed. */