1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/slab.h>
26 #include <linux/stackdepot.h>
27 #include <linux/stacktrace.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/bug.h>
35 struct slab *kasan_addr_to_slab(const void *addr)
37 if (virt_addr_valid(addr))
38 return virt_to_slab(addr);
42 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
44 unsigned long entries[KASAN_STACK_DEPTH];
45 unsigned int nr_entries;
47 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
48 return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
51 void kasan_set_track(struct kasan_track *track, gfp_t flags)
53 #ifdef CONFIG_KASAN_EXTRA_INFO
54 u32 cpu = raw_smp_processor_id();
55 u64 ts_nsec = local_clock();
58 track->timestamp = ts_nsec >> 3;
59 #endif /* CONFIG_KASAN_EXTRA_INFO */
60 track->pid = current->pid;
61 track->stack = kasan_save_stack(flags,
62 STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
65 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
66 void kasan_enable_current(void)
68 current->kasan_depth++;
70 EXPORT_SYMBOL(kasan_enable_current);
72 void kasan_disable_current(void)
74 current->kasan_depth--;
76 EXPORT_SYMBOL(kasan_disable_current);
78 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80 void __kasan_unpoison_range(const void *address, size_t size)
82 kasan_unpoison(address, size, false);
85 #ifdef CONFIG_KASAN_STACK
86 /* Unpoison the entire stack for a task. */
87 void kasan_unpoison_task_stack(struct task_struct *task)
89 void *base = task_stack_page(task);
91 kasan_unpoison(base, THREAD_SIZE, false);
94 /* Unpoison the stack for the current task beyond a watermark sp value. */
95 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
98 * Calculate the task stack base address. Avoid using 'current'
99 * because this function is called by early resume code which hasn't
100 * yet set up the percpu register (%gs).
102 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
104 kasan_unpoison(base, watermark - base, false);
106 #endif /* CONFIG_KASAN_STACK */
108 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
113 if (unlikely(PageHighMem(page)))
116 if (!kasan_sample_page_alloc(order))
119 tag = kasan_random_tag();
120 kasan_unpoison(set_tag(page_address(page), tag),
121 PAGE_SIZE << order, init);
122 for (i = 0; i < (1 << order); i++)
123 page_kasan_tag_set(page + i, tag);
128 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
130 if (likely(!PageHighMem(page)))
131 kasan_poison(page_address(page), PAGE_SIZE << order,
132 KASAN_PAGE_FREE, init);
135 void __kasan_poison_slab(struct slab *slab)
137 struct page *page = slab_page(slab);
140 for (i = 0; i < compound_nr(page); i++)
141 page_kasan_tag_reset(page + i);
142 kasan_poison(page_address(page), page_size(page),
143 KASAN_SLAB_REDZONE, false);
146 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
148 kasan_unpoison(object, cache->object_size, false);
151 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
153 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
154 KASAN_SLAB_REDZONE, false);
158 * This function assigns a tag to an object considering the following:
159 * 1. A cache might have a constructor, which might save a pointer to a slab
160 * object somewhere (e.g. in the object itself). We preassign a tag for
161 * each object in caches with constructors during slab creation and reuse
162 * the same tag each time a particular object is allocated.
163 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
164 * accessed after being freed. We preassign tags for objects in these
166 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
167 * is stored as an array of indexes instead of a linked list. Assign tags
168 * based on objects indexes, so that objects that are next to each other
169 * get different tags.
171 static inline u8 assign_tag(struct kmem_cache *cache,
172 const void *object, bool init)
174 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
178 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
179 * set, assign a tag when the object is being allocated (init == false).
181 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
182 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
184 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
186 /* For SLAB assign tags based on the object index in the freelist. */
187 return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
190 * For SLUB assign a random tag during slab creation, otherwise reuse
191 * the already assigned tag.
193 return init ? kasan_random_tag() : get_tag(object);
197 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
200 /* Initialize per-object metadata if it is present. */
201 if (kasan_requires_meta())
202 kasan_init_object_meta(cache, object);
204 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
205 object = set_tag(object, assign_tag(cache, object, true));
207 return (void *)object;
210 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
211 unsigned long ip, bool quarantine, bool init)
215 if (!kasan_arch_is_ready())
218 tagged_object = object;
219 object = kasan_reset_tag(object);
221 if (is_kfence_address(object))
224 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
226 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
230 /* RCU slabs could be legally used after free within the RCU period */
231 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
234 if (!kasan_byte_accessible(tagged_object)) {
235 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
239 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
240 KASAN_SLAB_FREE, init);
242 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
245 if (kasan_stack_collection_enabled())
246 kasan_save_free_info(cache, tagged_object);
248 return kasan_quarantine_put(cache, object);
251 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
252 unsigned long ip, bool init)
254 return ____kasan_slab_free(cache, object, ip, true, init);
257 static inline bool check_page_allocation(void *ptr, unsigned long ip)
259 if (!kasan_arch_is_ready())
262 if (ptr != page_address(virt_to_head_page(ptr))) {
263 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
267 if (!kasan_byte_accessible(ptr)) {
268 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
275 void __kasan_kfree_large(void *ptr, unsigned long ip)
277 check_page_allocation(ptr, ip);
279 /* The object will be poisoned by kasan_poison_pages(). */
282 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
283 void *object, gfp_t flags, bool init)
288 if (gfpflags_allow_blocking(flags))
289 kasan_quarantine_reduce();
291 if (unlikely(object == NULL))
294 if (is_kfence_address(object))
295 return (void *)object;
298 * Generate and assign random tag for tag-based modes.
299 * Tag is ignored in set_tag() for the generic mode.
301 tag = assign_tag(cache, object, false);
302 tagged_object = set_tag(object, tag);
305 * Unpoison the whole object.
306 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
308 kasan_unpoison(tagged_object, cache->object_size, init);
310 /* Save alloc info (if possible) for non-kmalloc() allocations. */
311 if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
312 kasan_save_alloc_info(cache, tagged_object, flags);
314 return tagged_object;
317 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
318 const void *object, size_t size, gfp_t flags)
320 unsigned long redzone_start;
321 unsigned long redzone_end;
323 if (gfpflags_allow_blocking(flags))
324 kasan_quarantine_reduce();
326 if (unlikely(object == NULL))
329 if (is_kfence_address(kasan_reset_tag(object)))
330 return (void *)object;
333 * The object has already been unpoisoned by kasan_slab_alloc() for
334 * kmalloc() or by kasan_krealloc() for krealloc().
338 * The redzone has byte-level precision for the generic mode.
339 * Partially poison the last object granule to cover the unaligned
340 * part of the redzone.
342 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
343 kasan_poison_last_granule((void *)object, size);
345 /* Poison the aligned part of the redzone. */
346 redzone_start = round_up((unsigned long)(object + size),
348 redzone_end = round_up((unsigned long)(object + cache->object_size),
350 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
351 KASAN_SLAB_REDZONE, false);
354 * Save alloc info (if possible) for kmalloc() allocations.
355 * This also rewrites the alloc info when called from kasan_krealloc().
357 if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
358 kasan_save_alloc_info(cache, (void *)object, flags);
360 /* Keep the tag that was set by kasan_slab_alloc(). */
361 return (void *)object;
364 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
365 size_t size, gfp_t flags)
367 return ____kasan_kmalloc(cache, object, size, flags);
369 EXPORT_SYMBOL(__kasan_kmalloc);
371 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
374 unsigned long redzone_start;
375 unsigned long redzone_end;
377 if (gfpflags_allow_blocking(flags))
378 kasan_quarantine_reduce();
380 if (unlikely(ptr == NULL))
384 * The object has already been unpoisoned by kasan_unpoison_pages() for
385 * alloc_pages() or by kasan_krealloc() for krealloc().
389 * The redzone has byte-level precision for the generic mode.
390 * Partially poison the last object granule to cover the unaligned
391 * part of the redzone.
393 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
394 kasan_poison_last_granule(ptr, size);
396 /* Poison the aligned part of the redzone. */
397 redzone_start = round_up((unsigned long)(ptr + size),
399 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
400 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
401 KASAN_PAGE_REDZONE, false);
406 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
410 if (unlikely(object == ZERO_SIZE_PTR))
411 return (void *)object;
414 * Unpoison the object's data.
415 * Part of it might already have been unpoisoned, but it's unknown
416 * how big that part is.
418 kasan_unpoison(object, size, false);
420 slab = virt_to_slab(object);
422 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
424 return __kasan_kmalloc_large(object, size, flags);
426 return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
429 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
434 if (unlikely(PageHighMem(page)))
437 /* Bail out if allocation was excluded due to sampling. */
438 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
439 page_kasan_tag(page) == KASAN_TAG_KERNEL)
442 ptr = page_address(page);
444 if (check_page_allocation(ptr, ip))
447 kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
452 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
455 __kasan_unpoison_pages(page, order, false);
458 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
462 folio = virt_to_folio(ptr);
465 * Even though this function is only called for kmem_cache_alloc and
466 * kmalloc backed mempool allocations, those allocations can still be
467 * !PageSlab() when the size provided to kmalloc is larger than
468 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
470 if (unlikely(!folio_test_slab(folio))) {
471 if (check_page_allocation(ptr, ip))
473 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
476 struct slab *slab = folio_slab(folio);
478 return !____kasan_slab_free(slab->slab_cache, ptr, ip,
483 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
485 kasan_unpoison(ptr, size, false);
488 bool __kasan_check_byte(const void *address, unsigned long ip)
490 if (!kasan_byte_accessible(address)) {
491 kasan_report(address, 1, false, ip);