kvm: x86/cpuid: Only provide CPUID leaf 0xA if host has architectural PMU
[linux-2.6-microblaze.git] / mm / kfence / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/random.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/sysctl.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32
33 #include <asm/kfence.h>
34
35 #include "kfence.h"
36
37 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
38 #define KFENCE_WARN_ON(cond)                                                   \
39         ({                                                                     \
40                 const bool __cond = WARN_ON(cond);                             \
41                 if (unlikely(__cond)) {                                        \
42                         WRITE_ONCE(kfence_enabled, false);                     \
43                         disabled_by_warn = true;                               \
44                 }                                                              \
45                 __cond;                                                        \
46         })
47
48 /* === Data ================================================================= */
49
50 static bool kfence_enabled __read_mostly;
51 static bool disabled_by_warn __read_mostly;
52
53 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
54 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
55
56 #ifdef MODULE_PARAM_PREFIX
57 #undef MODULE_PARAM_PREFIX
58 #endif
59 #define MODULE_PARAM_PREFIX "kfence."
60
61 static int kfence_enable_late(void);
62 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
63 {
64         unsigned long num;
65         int ret = kstrtoul(val, 0, &num);
66
67         if (ret < 0)
68                 return ret;
69
70         if (!num) /* Using 0 to indicate KFENCE is disabled. */
71                 WRITE_ONCE(kfence_enabled, false);
72
73         *((unsigned long *)kp->arg) = num;
74
75         if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
76                 return disabled_by_warn ? -EINVAL : kfence_enable_late();
77         return 0;
78 }
79
80 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
81 {
82         if (!READ_ONCE(kfence_enabled))
83                 return sprintf(buffer, "0\n");
84
85         return param_get_ulong(buffer, kp);
86 }
87
88 static const struct kernel_param_ops sample_interval_param_ops = {
89         .set = param_set_sample_interval,
90         .get = param_get_sample_interval,
91 };
92 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
93
94 /* Pool usage% threshold when currently covered allocations are skipped. */
95 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
96 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
97
98 /* If true, use a deferrable timer. */
99 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
100 module_param_named(deferrable, kfence_deferrable, bool, 0444);
101
102 /* The pool of pages used for guard pages and objects. */
103 char *__kfence_pool __read_mostly;
104 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
105
106 /*
107  * Per-object metadata, with one-to-one mapping of object metadata to
108  * backing pages (in __kfence_pool).
109  */
110 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
111 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
112
113 /* Freelist with available objects. */
114 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
115 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
116
117 /*
118  * The static key to set up a KFENCE allocation; or if static keys are not used
119  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
120  */
121 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
122
123 /* Gates the allocation, ensuring only one succeeds in a given period. */
124 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
125
126 /*
127  * A Counting Bloom filter of allocation coverage: limits currently covered
128  * allocations of the same source filling up the pool.
129  *
130  * Assuming a range of 15%-85% unique allocations in the pool at any point in
131  * time, the below parameters provide a probablity of 0.02-0.33 for false
132  * positive hits respectively:
133  *
134  *      P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
135  */
136 #define ALLOC_COVERED_HNUM      2
137 #define ALLOC_COVERED_ORDER     (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
138 #define ALLOC_COVERED_SIZE      (1 << ALLOC_COVERED_ORDER)
139 #define ALLOC_COVERED_HNEXT(h)  hash_32(h, ALLOC_COVERED_ORDER)
140 #define ALLOC_COVERED_MASK      (ALLOC_COVERED_SIZE - 1)
141 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
142
143 /* Stack depth used to determine uniqueness of an allocation. */
144 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
145
146 /*
147  * Randomness for stack hashes, making the same collisions across reboots and
148  * different machines less likely.
149  */
150 static u32 stack_hash_seed __ro_after_init;
151
152 /* Statistics counters for debugfs. */
153 enum kfence_counter_id {
154         KFENCE_COUNTER_ALLOCATED,
155         KFENCE_COUNTER_ALLOCS,
156         KFENCE_COUNTER_FREES,
157         KFENCE_COUNTER_ZOMBIES,
158         KFENCE_COUNTER_BUGS,
159         KFENCE_COUNTER_SKIP_INCOMPAT,
160         KFENCE_COUNTER_SKIP_CAPACITY,
161         KFENCE_COUNTER_SKIP_COVERED,
162         KFENCE_COUNTER_COUNT,
163 };
164 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
165 static const char *const counter_names[] = {
166         [KFENCE_COUNTER_ALLOCATED]      = "currently allocated",
167         [KFENCE_COUNTER_ALLOCS]         = "total allocations",
168         [KFENCE_COUNTER_FREES]          = "total frees",
169         [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
170         [KFENCE_COUNTER_BUGS]           = "total bugs",
171         [KFENCE_COUNTER_SKIP_INCOMPAT]  = "skipped allocations (incompatible)",
172         [KFENCE_COUNTER_SKIP_CAPACITY]  = "skipped allocations (capacity)",
173         [KFENCE_COUNTER_SKIP_COVERED]   = "skipped allocations (covered)",
174 };
175 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
176
177 /* === Internals ============================================================ */
178
179 static inline bool should_skip_covered(void)
180 {
181         unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
182
183         return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
184 }
185
186 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
187 {
188         num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
189         num_entries = filter_irq_stacks(stack_entries, num_entries);
190         return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
191 }
192
193 /*
194  * Adds (or subtracts) count @val for allocation stack trace hash
195  * @alloc_stack_hash from Counting Bloom filter.
196  */
197 static void alloc_covered_add(u32 alloc_stack_hash, int val)
198 {
199         int i;
200
201         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
202                 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
203                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
204         }
205 }
206
207 /*
208  * Returns true if the allocation stack trace hash @alloc_stack_hash is
209  * currently contained (non-zero count) in Counting Bloom filter.
210  */
211 static bool alloc_covered_contains(u32 alloc_stack_hash)
212 {
213         int i;
214
215         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
216                 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
217                         return false;
218                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
219         }
220
221         return true;
222 }
223
224 static bool kfence_protect(unsigned long addr)
225 {
226         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
227 }
228
229 static bool kfence_unprotect(unsigned long addr)
230 {
231         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
232 }
233
234 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
235 {
236         long index;
237
238         /* The checks do not affect performance; only called from slow-paths. */
239
240         if (!is_kfence_address((void *)addr))
241                 return NULL;
242
243         /*
244          * May be an invalid index if called with an address at the edge of
245          * __kfence_pool, in which case we would report an "invalid access"
246          * error.
247          */
248         index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
249         if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
250                 return NULL;
251
252         return &kfence_metadata[index];
253 }
254
255 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
256 {
257         unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
258         unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
259
260         /* The checks do not affect performance; only called from slow-paths. */
261
262         /* Only call with a pointer into kfence_metadata. */
263         if (KFENCE_WARN_ON(meta < kfence_metadata ||
264                            meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
265                 return 0;
266
267         /*
268          * This metadata object only ever maps to 1 page; verify that the stored
269          * address is in the expected range.
270          */
271         if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
272                 return 0;
273
274         return pageaddr;
275 }
276
277 /*
278  * Update the object's metadata state, including updating the alloc/free stacks
279  * depending on the state transition.
280  */
281 static noinline void
282 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
283                       unsigned long *stack_entries, size_t num_stack_entries)
284 {
285         struct kfence_track *track =
286                 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
287
288         lockdep_assert_held(&meta->lock);
289
290         if (stack_entries) {
291                 memcpy(track->stack_entries, stack_entries,
292                        num_stack_entries * sizeof(stack_entries[0]));
293         } else {
294                 /*
295                  * Skip over 1 (this) functions; noinline ensures we do not
296                  * accidentally skip over the caller by never inlining.
297                  */
298                 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
299         }
300         track->num_stack_entries = num_stack_entries;
301         track->pid = task_pid_nr(current);
302         track->cpu = raw_smp_processor_id();
303         track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
304
305         /*
306          * Pairs with READ_ONCE() in
307          *      kfence_shutdown_cache(),
308          *      kfence_handle_page_fault().
309          */
310         WRITE_ONCE(meta->state, next);
311 }
312
313 /* Write canary byte to @addr. */
314 static inline bool set_canary_byte(u8 *addr)
315 {
316         *addr = KFENCE_CANARY_PATTERN(addr);
317         return true;
318 }
319
320 /* Check canary byte at @addr. */
321 static inline bool check_canary_byte(u8 *addr)
322 {
323         struct kfence_metadata *meta;
324         unsigned long flags;
325
326         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
327                 return true;
328
329         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
330
331         meta = addr_to_metadata((unsigned long)addr);
332         raw_spin_lock_irqsave(&meta->lock, flags);
333         kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
334         raw_spin_unlock_irqrestore(&meta->lock, flags);
335
336         return false;
337 }
338
339 /* __always_inline this to ensure we won't do an indirect call to fn. */
340 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
341 {
342         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
343         unsigned long addr;
344
345         /*
346          * We'll iterate over each canary byte per-side until fn() returns
347          * false. However, we'll still iterate over the canary bytes to the
348          * right of the object even if there was an error in the canary bytes to
349          * the left of the object. Specifically, if check_canary_byte()
350          * generates an error, showing both sides might give more clues as to
351          * what the error is about when displaying which bytes were corrupted.
352          */
353
354         /* Apply to left of object. */
355         for (addr = pageaddr; addr < meta->addr; addr++) {
356                 if (!fn((u8 *)addr))
357                         break;
358         }
359
360         /* Apply to right of object. */
361         for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
362                 if (!fn((u8 *)addr))
363                         break;
364         }
365 }
366
367 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
368                                   unsigned long *stack_entries, size_t num_stack_entries,
369                                   u32 alloc_stack_hash)
370 {
371         struct kfence_metadata *meta = NULL;
372         unsigned long flags;
373         struct slab *slab;
374         void *addr;
375
376         /* Try to obtain a free object. */
377         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
378         if (!list_empty(&kfence_freelist)) {
379                 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
380                 list_del_init(&meta->list);
381         }
382         raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
383         if (!meta) {
384                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
385                 return NULL;
386         }
387
388         if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
389                 /*
390                  * This is extremely unlikely -- we are reporting on a
391                  * use-after-free, which locked meta->lock, and the reporting
392                  * code via printk calls kmalloc() which ends up in
393                  * kfence_alloc() and tries to grab the same object that we're
394                  * reporting on. While it has never been observed, lockdep does
395                  * report that there is a possibility of deadlock. Fix it by
396                  * using trylock and bailing out gracefully.
397                  */
398                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
399                 /* Put the object back on the freelist. */
400                 list_add_tail(&meta->list, &kfence_freelist);
401                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
402
403                 return NULL;
404         }
405
406         meta->addr = metadata_to_pageaddr(meta);
407         /* Unprotect if we're reusing this page. */
408         if (meta->state == KFENCE_OBJECT_FREED)
409                 kfence_unprotect(meta->addr);
410
411         /*
412          * Note: for allocations made before RNG initialization, will always
413          * return zero. We still benefit from enabling KFENCE as early as
414          * possible, even when the RNG is not yet available, as this will allow
415          * KFENCE to detect bugs due to earlier allocations. The only downside
416          * is that the out-of-bounds accesses detected are deterministic for
417          * such allocations.
418          */
419         if (prandom_u32_max(2)) {
420                 /* Allocate on the "right" side, re-calculate address. */
421                 meta->addr += PAGE_SIZE - size;
422                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
423         }
424
425         addr = (void *)meta->addr;
426
427         /* Update remaining metadata. */
428         metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
429         /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
430         WRITE_ONCE(meta->cache, cache);
431         meta->size = size;
432         meta->alloc_stack_hash = alloc_stack_hash;
433         raw_spin_unlock_irqrestore(&meta->lock, flags);
434
435         alloc_covered_add(alloc_stack_hash, 1);
436
437         /* Set required slab fields. */
438         slab = virt_to_slab((void *)meta->addr);
439         slab->slab_cache = cache;
440 #if defined(CONFIG_SLUB)
441         slab->objects = 1;
442 #elif defined(CONFIG_SLAB)
443         slab->s_mem = addr;
444 #endif
445
446         /* Memory initialization. */
447         for_each_canary(meta, set_canary_byte);
448
449         /*
450          * We check slab_want_init_on_alloc() ourselves, rather than letting
451          * SL*B do the initialization, as otherwise we might overwrite KFENCE's
452          * redzone.
453          */
454         if (unlikely(slab_want_init_on_alloc(gfp, cache)))
455                 memzero_explicit(addr, size);
456         if (cache->ctor)
457                 cache->ctor(addr);
458
459         if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
460                 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
461
462         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
463         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
464
465         return addr;
466 }
467
468 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
469 {
470         struct kcsan_scoped_access assert_page_exclusive;
471         unsigned long flags;
472         bool init;
473
474         raw_spin_lock_irqsave(&meta->lock, flags);
475
476         if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
477                 /* Invalid or double-free, bail out. */
478                 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
479                 kfence_report_error((unsigned long)addr, false, NULL, meta,
480                                     KFENCE_ERROR_INVALID_FREE);
481                 raw_spin_unlock_irqrestore(&meta->lock, flags);
482                 return;
483         }
484
485         /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
486         kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
487                                   KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
488                                   &assert_page_exclusive);
489
490         if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
491                 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
492
493         /* Restore page protection if there was an OOB access. */
494         if (meta->unprotected_page) {
495                 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
496                 kfence_protect(meta->unprotected_page);
497                 meta->unprotected_page = 0;
498         }
499
500         /* Mark the object as freed. */
501         metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
502         init = slab_want_init_on_free(meta->cache);
503         raw_spin_unlock_irqrestore(&meta->lock, flags);
504
505         alloc_covered_add(meta->alloc_stack_hash, -1);
506
507         /* Check canary bytes for memory corruption. */
508         for_each_canary(meta, check_canary_byte);
509
510         /*
511          * Clear memory if init-on-free is set. While we protect the page, the
512          * data is still there, and after a use-after-free is detected, we
513          * unprotect the page, so the data is still accessible.
514          */
515         if (!zombie && unlikely(init))
516                 memzero_explicit(addr, meta->size);
517
518         /* Protect to detect use-after-frees. */
519         kfence_protect((unsigned long)addr);
520
521         kcsan_end_scoped_access(&assert_page_exclusive);
522         if (!zombie) {
523                 /* Add it to the tail of the freelist for reuse. */
524                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
525                 KFENCE_WARN_ON(!list_empty(&meta->list));
526                 list_add_tail(&meta->list, &kfence_freelist);
527                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
528
529                 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
530                 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
531         } else {
532                 /* See kfence_shutdown_cache(). */
533                 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
534         }
535 }
536
537 static void rcu_guarded_free(struct rcu_head *h)
538 {
539         struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
540
541         kfence_guarded_free((void *)meta->addr, meta, false);
542 }
543
544 /*
545  * Initialization of the KFENCE pool after its allocation.
546  * Returns 0 on success; otherwise returns the address up to
547  * which partial initialization succeeded.
548  */
549 static unsigned long kfence_init_pool(void)
550 {
551         unsigned long addr = (unsigned long)__kfence_pool;
552         struct page *pages;
553         int i;
554
555         if (!arch_kfence_init_pool())
556                 return addr;
557
558         pages = virt_to_page(addr);
559
560         /*
561          * Set up object pages: they must have PG_slab set, to avoid freeing
562          * these as real pages.
563          *
564          * We also want to avoid inserting kfence_free() in the kfree()
565          * fast-path in SLUB, and therefore need to ensure kfree() correctly
566          * enters __slab_free() slow-path.
567          */
568         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
569                 struct slab *slab = page_slab(&pages[i]);
570
571                 if (!i || (i % 2))
572                         continue;
573
574                 /* Verify we do not have a compound head page. */
575                 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
576                         return addr;
577
578                 __folio_set_slab(slab_folio(slab));
579 #ifdef CONFIG_MEMCG
580                 slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
581                                    MEMCG_DATA_OBJCGS;
582 #endif
583         }
584
585         /*
586          * Protect the first 2 pages. The first page is mostly unnecessary, and
587          * merely serves as an extended guard page. However, adding one
588          * additional page in the beginning gives us an even number of pages,
589          * which simplifies the mapping of address to metadata index.
590          */
591         for (i = 0; i < 2; i++) {
592                 if (unlikely(!kfence_protect(addr)))
593                         return addr;
594
595                 addr += PAGE_SIZE;
596         }
597
598         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
599                 struct kfence_metadata *meta = &kfence_metadata[i];
600
601                 /* Initialize metadata. */
602                 INIT_LIST_HEAD(&meta->list);
603                 raw_spin_lock_init(&meta->lock);
604                 meta->state = KFENCE_OBJECT_UNUSED;
605                 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
606                 list_add_tail(&meta->list, &kfence_freelist);
607
608                 /* Protect the right redzone. */
609                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
610                         return addr;
611
612                 addr += 2 * PAGE_SIZE;
613         }
614
615         /*
616          * The pool is live and will never be deallocated from this point on.
617          * Remove the pool object from the kmemleak object tree, as it would
618          * otherwise overlap with allocations returned by kfence_alloc(), which
619          * are registered with kmemleak through the slab post-alloc hook.
620          */
621         kmemleak_free(__kfence_pool);
622
623         return 0;
624 }
625
626 static bool __init kfence_init_pool_early(void)
627 {
628         unsigned long addr;
629
630         if (!__kfence_pool)
631                 return false;
632
633         addr = kfence_init_pool();
634
635         if (!addr)
636                 return true;
637
638         /*
639          * Only release unprotected pages, and do not try to go back and change
640          * page attributes due to risk of failing to do so as well. If changing
641          * page attributes for some pages fails, it is very likely that it also
642          * fails for the first page, and therefore expect addr==__kfence_pool in
643          * most failure cases.
644          */
645         memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
646         __kfence_pool = NULL;
647         return false;
648 }
649
650 static bool kfence_init_pool_late(void)
651 {
652         unsigned long addr, free_size;
653
654         addr = kfence_init_pool();
655
656         if (!addr)
657                 return true;
658
659         /* Same as above. */
660         free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
661 #ifdef CONFIG_CONTIG_ALLOC
662         free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
663 #else
664         free_pages_exact((void *)addr, free_size);
665 #endif
666         __kfence_pool = NULL;
667         return false;
668 }
669
670 /* === DebugFS Interface ==================================================== */
671
672 static int stats_show(struct seq_file *seq, void *v)
673 {
674         int i;
675
676         seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
677         for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
678                 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
679
680         return 0;
681 }
682 DEFINE_SHOW_ATTRIBUTE(stats);
683
684 /*
685  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
686  * start_object() and next_object() return the object index + 1, because NULL is used
687  * to stop iteration.
688  */
689 static void *start_object(struct seq_file *seq, loff_t *pos)
690 {
691         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
692                 return (void *)((long)*pos + 1);
693         return NULL;
694 }
695
696 static void stop_object(struct seq_file *seq, void *v)
697 {
698 }
699
700 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
701 {
702         ++*pos;
703         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
704                 return (void *)((long)*pos + 1);
705         return NULL;
706 }
707
708 static int show_object(struct seq_file *seq, void *v)
709 {
710         struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
711         unsigned long flags;
712
713         raw_spin_lock_irqsave(&meta->lock, flags);
714         kfence_print_object(seq, meta);
715         raw_spin_unlock_irqrestore(&meta->lock, flags);
716         seq_puts(seq, "---------------------------------\n");
717
718         return 0;
719 }
720
721 static const struct seq_operations object_seqops = {
722         .start = start_object,
723         .next = next_object,
724         .stop = stop_object,
725         .show = show_object,
726 };
727
728 static int open_objects(struct inode *inode, struct file *file)
729 {
730         return seq_open(file, &object_seqops);
731 }
732
733 static const struct file_operations objects_fops = {
734         .open = open_objects,
735         .read = seq_read,
736         .llseek = seq_lseek,
737         .release = seq_release,
738 };
739
740 static int __init kfence_debugfs_init(void)
741 {
742         struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
743
744         debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
745         debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
746         return 0;
747 }
748
749 late_initcall(kfence_debugfs_init);
750
751 /* === Allocation Gate Timer ================================================ */
752
753 static struct delayed_work kfence_timer;
754
755 #ifdef CONFIG_KFENCE_STATIC_KEYS
756 /* Wait queue to wake up allocation-gate timer task. */
757 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
758
759 static void wake_up_kfence_timer(struct irq_work *work)
760 {
761         wake_up(&allocation_wait);
762 }
763 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
764 #endif
765
766 /*
767  * Set up delayed work, which will enable and disable the static key. We need to
768  * use a work queue (rather than a simple timer), since enabling and disabling a
769  * static key cannot be done from an interrupt.
770  *
771  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
772  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
773  * more aggressive sampling intervals), we could get away with a variant that
774  * avoids IPIs, at the cost of not immediately capturing allocations if the
775  * instructions remain cached.
776  */
777 static void toggle_allocation_gate(struct work_struct *work)
778 {
779         if (!READ_ONCE(kfence_enabled))
780                 return;
781
782         atomic_set(&kfence_allocation_gate, 0);
783 #ifdef CONFIG_KFENCE_STATIC_KEYS
784         /* Enable static key, and await allocation to happen. */
785         static_branch_enable(&kfence_allocation_key);
786
787         if (sysctl_hung_task_timeout_secs) {
788                 /*
789                  * During low activity with no allocations we might wait a
790                  * while; let's avoid the hung task warning.
791                  */
792                 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
793                                         sysctl_hung_task_timeout_secs * HZ / 2);
794         } else {
795                 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
796         }
797
798         /* Disable static key and reset timer. */
799         static_branch_disable(&kfence_allocation_key);
800 #endif
801         queue_delayed_work(system_unbound_wq, &kfence_timer,
802                            msecs_to_jiffies(kfence_sample_interval));
803 }
804
805 /* === Public interface ===================================================== */
806
807 void __init kfence_alloc_pool(void)
808 {
809         if (!kfence_sample_interval)
810                 return;
811
812         __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
813
814         if (!__kfence_pool)
815                 pr_err("failed to allocate pool\n");
816 }
817
818 static void kfence_init_enable(void)
819 {
820         if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
821                 static_branch_enable(&kfence_allocation_key);
822
823         if (kfence_deferrable)
824                 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
825         else
826                 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
827
828         WRITE_ONCE(kfence_enabled, true);
829         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
830
831         pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
832                 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
833                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
834 }
835
836 void __init kfence_init(void)
837 {
838         stack_hash_seed = (u32)random_get_entropy();
839
840         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
841         if (!kfence_sample_interval)
842                 return;
843
844         if (!kfence_init_pool_early()) {
845                 pr_err("%s failed\n", __func__);
846                 return;
847         }
848
849         kfence_init_enable();
850 }
851
852 static int kfence_init_late(void)
853 {
854         const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
855 #ifdef CONFIG_CONTIG_ALLOC
856         struct page *pages;
857
858         pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
859         if (!pages)
860                 return -ENOMEM;
861         __kfence_pool = page_to_virt(pages);
862 #else
863         if (nr_pages > MAX_ORDER_NR_PAGES) {
864                 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
865                 return -EINVAL;
866         }
867         __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
868         if (!__kfence_pool)
869                 return -ENOMEM;
870 #endif
871
872         if (!kfence_init_pool_late()) {
873                 pr_err("%s failed\n", __func__);
874                 return -EBUSY;
875         }
876
877         kfence_init_enable();
878         return 0;
879 }
880
881 static int kfence_enable_late(void)
882 {
883         if (!__kfence_pool)
884                 return kfence_init_late();
885
886         WRITE_ONCE(kfence_enabled, true);
887         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
888         return 0;
889 }
890
891 void kfence_shutdown_cache(struct kmem_cache *s)
892 {
893         unsigned long flags;
894         struct kfence_metadata *meta;
895         int i;
896
897         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
898                 bool in_use;
899
900                 meta = &kfence_metadata[i];
901
902                 /*
903                  * If we observe some inconsistent cache and state pair where we
904                  * should have returned false here, cache destruction is racing
905                  * with either kmem_cache_alloc() or kmem_cache_free(). Taking
906                  * the lock will not help, as different critical section
907                  * serialization will have the same outcome.
908                  */
909                 if (READ_ONCE(meta->cache) != s ||
910                     READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
911                         continue;
912
913                 raw_spin_lock_irqsave(&meta->lock, flags);
914                 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
915                 raw_spin_unlock_irqrestore(&meta->lock, flags);
916
917                 if (in_use) {
918                         /*
919                          * This cache still has allocations, and we should not
920                          * release them back into the freelist so they can still
921                          * safely be used and retain the kernel's default
922                          * behaviour of keeping the allocations alive (leak the
923                          * cache); however, they effectively become "zombie
924                          * allocations" as the KFENCE objects are the only ones
925                          * still in use and the owning cache is being destroyed.
926                          *
927                          * We mark them freed, so that any subsequent use shows
928                          * more useful error messages that will include stack
929                          * traces of the user of the object, the original
930                          * allocation, and caller to shutdown_cache().
931                          */
932                         kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
933                 }
934         }
935
936         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
937                 meta = &kfence_metadata[i];
938
939                 /* See above. */
940                 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
941                         continue;
942
943                 raw_spin_lock_irqsave(&meta->lock, flags);
944                 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
945                         meta->cache = NULL;
946                 raw_spin_unlock_irqrestore(&meta->lock, flags);
947         }
948 }
949
950 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
951 {
952         unsigned long stack_entries[KFENCE_STACK_DEPTH];
953         size_t num_stack_entries;
954         u32 alloc_stack_hash;
955
956         /*
957          * Perform size check before switching kfence_allocation_gate, so that
958          * we don't disable KFENCE without making an allocation.
959          */
960         if (size > PAGE_SIZE) {
961                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
962                 return NULL;
963         }
964
965         /*
966          * Skip allocations from non-default zones, including DMA. We cannot
967          * guarantee that pages in the KFENCE pool will have the requested
968          * properties (e.g. reside in DMAable memory).
969          */
970         if ((flags & GFP_ZONEMASK) ||
971             (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
972                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
973                 return NULL;
974         }
975
976         if (atomic_inc_return(&kfence_allocation_gate) > 1)
977                 return NULL;
978 #ifdef CONFIG_KFENCE_STATIC_KEYS
979         /*
980          * waitqueue_active() is fully ordered after the update of
981          * kfence_allocation_gate per atomic_inc_return().
982          */
983         if (waitqueue_active(&allocation_wait)) {
984                 /*
985                  * Calling wake_up() here may deadlock when allocations happen
986                  * from within timer code. Use an irq_work to defer it.
987                  */
988                 irq_work_queue(&wake_up_kfence_timer_work);
989         }
990 #endif
991
992         if (!READ_ONCE(kfence_enabled))
993                 return NULL;
994
995         num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
996
997         /*
998          * Do expensive check for coverage of allocation in slow-path after
999          * allocation_gate has already become non-zero, even though it might
1000          * mean not making any allocation within a given sample interval.
1001          *
1002          * This ensures reasonable allocation coverage when the pool is almost
1003          * full, including avoiding long-lived allocations of the same source
1004          * filling up the pool (e.g. pagecache allocations).
1005          */
1006         alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1007         if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1008                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1009                 return NULL;
1010         }
1011
1012         return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1013                                     alloc_stack_hash);
1014 }
1015
1016 size_t kfence_ksize(const void *addr)
1017 {
1018         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1019
1020         /*
1021          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1022          * either a use-after-free or invalid access.
1023          */
1024         return meta ? meta->size : 0;
1025 }
1026
1027 void *kfence_object_start(const void *addr)
1028 {
1029         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1030
1031         /*
1032          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1033          * either a use-after-free or invalid access.
1034          */
1035         return meta ? (void *)meta->addr : NULL;
1036 }
1037
1038 void __kfence_free(void *addr)
1039 {
1040         struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1041
1042 #ifdef CONFIG_MEMCG
1043         KFENCE_WARN_ON(meta->objcg);
1044 #endif
1045         /*
1046          * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1047          * the object, as the object page may be recycled for other-typed
1048          * objects once it has been freed. meta->cache may be NULL if the cache
1049          * was destroyed.
1050          */
1051         if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1052                 call_rcu(&meta->rcu_head, rcu_guarded_free);
1053         else
1054                 kfence_guarded_free(addr, meta, false);
1055 }
1056
1057 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1058 {
1059         const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1060         struct kfence_metadata *to_report = NULL;
1061         enum kfence_error_type error_type;
1062         unsigned long flags;
1063
1064         if (!is_kfence_address((void *)addr))
1065                 return false;
1066
1067         if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1068                 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1069
1070         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1071
1072         if (page_index % 2) {
1073                 /* This is a redzone, report a buffer overflow. */
1074                 struct kfence_metadata *meta;
1075                 int distance = 0;
1076
1077                 meta = addr_to_metadata(addr - PAGE_SIZE);
1078                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1079                         to_report = meta;
1080                         /* Data race ok; distance calculation approximate. */
1081                         distance = addr - data_race(meta->addr + meta->size);
1082                 }
1083
1084                 meta = addr_to_metadata(addr + PAGE_SIZE);
1085                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1086                         /* Data race ok; distance calculation approximate. */
1087                         if (!to_report || distance > data_race(meta->addr) - addr)
1088                                 to_report = meta;
1089                 }
1090
1091                 if (!to_report)
1092                         goto out;
1093
1094                 raw_spin_lock_irqsave(&to_report->lock, flags);
1095                 to_report->unprotected_page = addr;
1096                 error_type = KFENCE_ERROR_OOB;
1097
1098                 /*
1099                  * If the object was freed before we took the look we can still
1100                  * report this as an OOB -- the report will simply show the
1101                  * stacktrace of the free as well.
1102                  */
1103         } else {
1104                 to_report = addr_to_metadata(addr);
1105                 if (!to_report)
1106                         goto out;
1107
1108                 raw_spin_lock_irqsave(&to_report->lock, flags);
1109                 error_type = KFENCE_ERROR_UAF;
1110                 /*
1111                  * We may race with __kfence_alloc(), and it is possible that a
1112                  * freed object may be reallocated. We simply report this as a
1113                  * use-after-free, with the stack trace showing the place where
1114                  * the object was re-allocated.
1115                  */
1116         }
1117
1118 out:
1119         if (to_report) {
1120                 kfence_report_error(addr, is_write, regs, to_report, error_type);
1121                 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1122         } else {
1123                 /* This may be a UAF or OOB access, but we can't be sure. */
1124                 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1125         }
1126
1127         return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1128 }