1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common tag-based KASAN code.
5 * Copyright (c) 2018 Google, Inc.
6 * Copyright (c) 2020 Google, Inc.
9 #include <linux/atomic.h>
10 #include <linux/init.h>
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/memory.h>
16 #include <linux/stackdepot.h>
17 #include <linux/static_key.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
24 #define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
26 enum kasan_arg_stacktrace {
27 KASAN_ARG_STACKTRACE_DEFAULT,
28 KASAN_ARG_STACKTRACE_OFF,
29 KASAN_ARG_STACKTRACE_ON,
32 static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
34 /* Whether to collect alloc/free stack traces. */
35 DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
37 /* Non-zero, as initial pointer values are 0. */
38 #define STACK_RING_BUSY_PTR ((void *)1)
40 struct kasan_stack_ring stack_ring = {
41 .lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
44 /* kasan.stacktrace=off/on */
45 static int __init early_kasan_flag_stacktrace(char *arg)
50 if (!strcmp(arg, "off"))
51 kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
52 else if (!strcmp(arg, "on"))
53 kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
59 early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
61 /* kasan.stack_ring_size=<number of entries> */
62 static int __init early_kasan_flag_stack_ring_size(char *arg)
67 return kstrtoul(arg, 0, &stack_ring.size);
69 early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
71 void __init kasan_init_tags(void)
73 switch (kasan_arg_stacktrace) {
74 case KASAN_ARG_STACKTRACE_DEFAULT:
75 /* Default is specified by kasan_flag_stacktrace definition. */
77 case KASAN_ARG_STACKTRACE_OFF:
78 static_branch_disable(&kasan_flag_stacktrace);
80 case KASAN_ARG_STACKTRACE_ON:
81 static_branch_enable(&kasan_flag_stacktrace);
85 if (kasan_stack_collection_enabled()) {
87 stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
88 stack_ring.entries = memblock_alloc(
89 sizeof(stack_ring.entries[0]) * stack_ring.size,
91 if (WARN_ON(!stack_ring.entries))
92 static_branch_disable(&kasan_flag_stacktrace);
96 static void save_stack_info(struct kmem_cache *cache, void *object,
97 gfp_t gfp_flags, bool is_free)
100 depot_stack_handle_t stack, old_stack;
102 struct kasan_stack_ring_entry *entry;
105 stack = kasan_save_stack(gfp_flags,
106 STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
109 * Prevent save_stack_info() from modifying stack ring
110 * when kasan_complete_mode_report_info() is walking it.
112 read_lock_irqsave(&stack_ring.lock, flags);
115 pos = atomic64_fetch_add(1, &stack_ring.pos);
116 entry = &stack_ring.entries[pos % stack_ring.size];
118 /* Detect stack ring entry slots that are being written to. */
119 old_ptr = READ_ONCE(entry->ptr);
120 if (old_ptr == STACK_RING_BUSY_PTR)
121 goto next; /* Busy slot. */
122 if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
123 goto next; /* Busy slot. */
125 old_stack = entry->stack;
127 entry->size = cache->object_size;
128 entry->pid = current->pid;
129 entry->stack = stack;
130 entry->is_free = is_free;
134 read_unlock_irqrestore(&stack_ring.lock, flags);
137 stack_depot_put(old_stack);
140 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
142 save_stack_info(cache, object, flags, false);
145 void kasan_save_free_info(struct kmem_cache *cache, void *object)
147 save_stack_info(cache, object, 0, true);