1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
25 #include <kunit/test.h>
27 #include "../mm/kasan/kasan.h"
29 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
35 void *kasan_ptr_result;
38 static struct kunit_resource resource;
39 static struct kunit_kasan_expectation fail_data;
40 static bool multishot;
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
48 static int kasan_test_init(struct kunit *test)
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
55 multishot = kasan_save_enable_multi_shot();
56 fail_data.report_found = false;
57 kunit_add_named_resource(test, NULL, NULL, &resource,
58 "kasan_data", &fail_data);
62 static void kasan_test_exit(struct kunit *test)
64 kasan_restore_multi_shot(multishot);
65 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
69 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
70 * KASAN report; causes a test failure otherwise. This relies on a KUnit
71 * resource named "kasan_data". Do not use this name for KUnit resources
72 * outside of KASAN tests.
74 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
75 * checking is auto-disabled. When this happens, this test handler reenables
76 * tag checking. As tag checking can be only disabled or enabled per CPU,
77 * this handler disables migration (preemption).
79 * Since the compiler doesn't see that the expression can change the fail_data
80 * fields, it can reorder or optimize away the accesses to those fields.
81 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
82 * expression to prevent that.
84 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
85 * false. This allows detecting KASAN reports that happen outside of the checks
86 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
87 * and in kasan_test_exit.
89 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
90 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
91 !kasan_async_mode_enabled()) \
93 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
97 if (!READ_ONCE(fail_data.report_found)) { \
98 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
99 "expected in \"" #expression \
100 "\", but none occurred"); \
102 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
103 if (READ_ONCE(fail_data.report_found)) \
104 kasan_enable_tagging_sync(); \
107 WRITE_ONCE(fail_data.report_found, false); \
110 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
111 if (!IS_ENABLED(config)) \
112 kunit_skip((test), "Test requires " #config "=y"); \
115 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
116 if (IS_ENABLED(config)) \
117 kunit_skip((test), "Test requires " #config "=n"); \
120 static void kmalloc_oob_right(struct kunit *test)
123 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
125 ptr = kmalloc(size, GFP_KERNEL);
126 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
129 * An unaligned access past the requested kmalloc size.
130 * Only generic KASAN can precisely detect these.
132 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
133 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
136 * An aligned access into the first out-of-bounds granule that falls
137 * within the aligned kmalloc object.
139 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
141 /* Out-of-bounds access past the aligned kmalloc object. */
142 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
143 ptr[size + KASAN_GRANULE_SIZE + 5]);
148 static void kmalloc_oob_left(struct kunit *test)
153 ptr = kmalloc(size, GFP_KERNEL);
154 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
156 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
160 static void kmalloc_node_oob_right(struct kunit *test)
165 ptr = kmalloc_node(size, GFP_KERNEL, 0);
166 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
168 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
173 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
174 * fit into a slab cache and therefore is allocated via the page allocator
175 * fallback. Since this kind of fallback is only implemented for SLUB, these
176 * tests are limited to that allocator.
178 static void kmalloc_pagealloc_oob_right(struct kunit *test)
181 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
183 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
185 ptr = kmalloc(size, GFP_KERNEL);
186 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
188 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
193 static void kmalloc_pagealloc_uaf(struct kunit *test)
196 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
198 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
200 ptr = kmalloc(size, GFP_KERNEL);
201 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
204 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
207 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
210 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
212 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
214 ptr = kmalloc(size, GFP_KERNEL);
215 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
217 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
220 static void pagealloc_oob_right(struct kunit *test)
225 size_t size = (1UL << (PAGE_SHIFT + order));
228 * With generic KASAN page allocations have no redzones, thus
229 * out-of-bounds detection is not guaranteed.
230 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
232 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
234 pages = alloc_pages(GFP_KERNEL, order);
235 ptr = page_address(pages);
236 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
238 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
239 free_pages((unsigned long)ptr, order);
242 static void pagealloc_uaf(struct kunit *test)
248 pages = alloc_pages(GFP_KERNEL, order);
249 ptr = page_address(pages);
250 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
251 free_pages((unsigned long)ptr, order);
253 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
256 static void kmalloc_large_oob_right(struct kunit *test)
259 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
262 * Allocate a chunk that is large enough, but still fits into a slab
263 * and does not trigger the page allocator fallback in SLUB.
265 ptr = kmalloc(size, GFP_KERNEL);
266 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
268 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
272 static void krealloc_more_oob_helper(struct kunit *test,
273 size_t size1, size_t size2)
278 KUNIT_ASSERT_LT(test, size1, size2);
279 middle = size1 + (size2 - size1) / 2;
281 ptr1 = kmalloc(size1, GFP_KERNEL);
282 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
284 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
285 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
287 /* All offsets up to size2 must be accessible. */
288 ptr2[size1 - 1] = 'x';
291 ptr2[size2 - 1] = 'x';
293 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
294 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
295 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
297 /* For all modes first aligned offset after size2 must be inaccessible. */
298 KUNIT_EXPECT_KASAN_FAIL(test,
299 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
304 static void krealloc_less_oob_helper(struct kunit *test,
305 size_t size1, size_t size2)
310 KUNIT_ASSERT_LT(test, size2, size1);
311 middle = size2 + (size1 - size2) / 2;
313 ptr1 = kmalloc(size1, GFP_KERNEL);
314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
316 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
317 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
319 /* Must be accessible for all modes. */
320 ptr2[size2 - 1] = 'x';
322 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
323 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
324 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
326 /* For all modes first aligned offset after size2 must be inaccessible. */
327 KUNIT_EXPECT_KASAN_FAIL(test,
328 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
331 * For all modes all size2, middle, and size1 should land in separate
332 * granules and thus the latter two offsets should be inaccessible.
334 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
335 round_down(middle, KASAN_GRANULE_SIZE));
336 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
337 round_down(size1, KASAN_GRANULE_SIZE));
338 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
339 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
340 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
345 static void krealloc_more_oob(struct kunit *test)
347 krealloc_more_oob_helper(test, 201, 235);
350 static void krealloc_less_oob(struct kunit *test)
352 krealloc_less_oob_helper(test, 235, 201);
355 static void krealloc_pagealloc_more_oob(struct kunit *test)
357 /* page_alloc fallback in only implemented for SLUB. */
358 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
360 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
361 KMALLOC_MAX_CACHE_SIZE + 235);
364 static void krealloc_pagealloc_less_oob(struct kunit *test)
366 /* page_alloc fallback in only implemented for SLUB. */
367 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
369 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
370 KMALLOC_MAX_CACHE_SIZE + 201);
374 * Check that krealloc() detects a use-after-free, returns NULL,
375 * and doesn't unpoison the freed object.
377 static void krealloc_uaf(struct kunit *test)
383 ptr1 = kmalloc(size1, GFP_KERNEL);
384 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
387 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
388 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
389 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
392 static void kmalloc_oob_16(struct kunit *test)
398 /* This test is specifically crafted for the generic mode. */
399 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
401 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
402 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
404 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
405 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
407 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
412 static void kmalloc_uaf_16(struct kunit *test)
418 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
419 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
421 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
422 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
425 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
430 * Note: in the memset tests below, the written range touches both valid and
431 * invalid memory. This makes sure that the instrumentation does not only check
432 * the starting address but the whole range.
435 static void kmalloc_oob_memset_2(struct kunit *test)
438 size_t size = 128 - KASAN_GRANULE_SIZE;
440 ptr = kmalloc(size, GFP_KERNEL);
441 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
443 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
447 static void kmalloc_oob_memset_4(struct kunit *test)
450 size_t size = 128 - KASAN_GRANULE_SIZE;
452 ptr = kmalloc(size, GFP_KERNEL);
453 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
455 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
459 static void kmalloc_oob_memset_8(struct kunit *test)
462 size_t size = 128 - KASAN_GRANULE_SIZE;
464 ptr = kmalloc(size, GFP_KERNEL);
465 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
467 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
471 static void kmalloc_oob_memset_16(struct kunit *test)
474 size_t size = 128 - KASAN_GRANULE_SIZE;
476 ptr = kmalloc(size, GFP_KERNEL);
477 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
479 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
483 static void kmalloc_oob_in_memset(struct kunit *test)
486 size_t size = 128 - KASAN_GRANULE_SIZE;
488 ptr = kmalloc(size, GFP_KERNEL);
489 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
491 KUNIT_EXPECT_KASAN_FAIL(test,
492 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
496 static void kmalloc_memmove_negative_size(struct kunit *test)
500 volatile size_t invalid_size = -2;
503 * Hardware tag-based mode doesn't check memmove for negative size.
504 * As a result, this test introduces a side-effect memory corruption,
505 * which can result in a crash.
507 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
509 ptr = kmalloc(size, GFP_KERNEL);
510 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
512 memset((char *)ptr, 0, 64);
513 KUNIT_EXPECT_KASAN_FAIL(test,
514 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
518 static void kmalloc_memmove_invalid_size(struct kunit *test)
522 volatile size_t invalid_size = size;
524 ptr = kmalloc(size, GFP_KERNEL);
525 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
527 memset((char *)ptr, 0, 64);
528 KUNIT_EXPECT_KASAN_FAIL(test,
529 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
533 static void kmalloc_uaf(struct kunit *test)
538 ptr = kmalloc(size, GFP_KERNEL);
539 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
542 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
545 static void kmalloc_uaf_memset(struct kunit *test)
551 * Only generic KASAN uses quarantine, which is required to avoid a
552 * kernel memory corruption this test causes.
554 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
556 ptr = kmalloc(size, GFP_KERNEL);
557 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
560 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
563 static void kmalloc_uaf2(struct kunit *test)
570 ptr1 = kmalloc(size, GFP_KERNEL);
571 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
575 ptr2 = kmalloc(size, GFP_KERNEL);
576 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
579 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
580 * Allow up to 16 attempts at generating different tags.
582 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
587 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
588 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
593 static void kfree_via_page(struct kunit *test)
598 unsigned long offset;
600 ptr = kmalloc(size, GFP_KERNEL);
601 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
603 page = virt_to_page(ptr);
604 offset = offset_in_page(ptr);
605 kfree(page_address(page) + offset);
608 static void kfree_via_phys(struct kunit *test)
614 ptr = kmalloc(size, GFP_KERNEL);
615 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
617 phys = virt_to_phys(ptr);
618 kfree(phys_to_virt(phys));
621 static void kmem_cache_oob(struct kunit *test)
625 struct kmem_cache *cache;
627 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
628 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
630 p = kmem_cache_alloc(cache, GFP_KERNEL);
632 kunit_err(test, "Allocation failed: %s\n", __func__);
633 kmem_cache_destroy(cache);
637 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
639 kmem_cache_free(cache, p);
640 kmem_cache_destroy(cache);
643 static void kmem_cache_accounted(struct kunit *test)
648 struct kmem_cache *cache;
650 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
651 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
654 * Several allocations with a delay to allow for lazy per memcg kmem
657 for (i = 0; i < 5; i++) {
658 p = kmem_cache_alloc(cache, GFP_KERNEL);
662 kmem_cache_free(cache, p);
667 kmem_cache_destroy(cache);
670 static void kmem_cache_bulk(struct kunit *test)
672 struct kmem_cache *cache;
678 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
679 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
681 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
683 kunit_err(test, "Allocation failed: %s\n", __func__);
684 kmem_cache_destroy(cache);
688 for (i = 0; i < ARRAY_SIZE(p); i++)
689 p[i][0] = p[i][size - 1] = 42;
691 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
692 kmem_cache_destroy(cache);
695 static char global_array[10];
697 static void kasan_global_oob(struct kunit *test)
700 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
701 * from failing here and panicking the kernel, access the array via a
702 * volatile pointer, which will prevent the compiler from being able to
703 * determine the array bounds.
705 * This access uses a volatile pointer to char (char *volatile) rather
706 * than the more conventional pointer to volatile char (volatile char *)
707 * because we want to prevent the compiler from making inferences about
708 * the pointer itself (i.e. its array bounds), not the data that it
711 char *volatile array = global_array;
712 char *p = &array[ARRAY_SIZE(global_array) + 3];
714 /* Only generic mode instruments globals. */
715 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
717 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
720 /* Check that ksize() makes the whole object accessible. */
721 static void ksize_unpoisons_memory(struct kunit *test)
724 size_t size = 123, real_size;
726 ptr = kmalloc(size, GFP_KERNEL);
727 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
728 real_size = ksize(ptr);
730 /* This access shouldn't trigger a KASAN report. */
734 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
740 * Check that a use-after-free is detected by ksize() and via normal accesses
743 static void ksize_uaf(struct kunit *test)
746 int size = 128 - KASAN_GRANULE_SIZE;
748 ptr = kmalloc(size, GFP_KERNEL);
749 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
752 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
753 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
754 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
757 static void kasan_stack_oob(struct kunit *test)
759 char stack_array[10];
760 /* See comment in kasan_global_oob. */
761 char *volatile array = stack_array;
762 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
764 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
766 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
769 static void kasan_alloca_oob_left(struct kunit *test)
772 char alloca_array[i];
773 /* See comment in kasan_global_oob. */
774 char *volatile array = alloca_array;
777 /* Only generic mode instruments dynamic allocas. */
778 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
779 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
781 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
784 static void kasan_alloca_oob_right(struct kunit *test)
787 char alloca_array[i];
788 /* See comment in kasan_global_oob. */
789 char *volatile array = alloca_array;
792 /* Only generic mode instruments dynamic allocas. */
793 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
794 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
796 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
799 static void kmem_cache_double_free(struct kunit *test)
803 struct kmem_cache *cache;
805 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
806 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
808 p = kmem_cache_alloc(cache, GFP_KERNEL);
810 kunit_err(test, "Allocation failed: %s\n", __func__);
811 kmem_cache_destroy(cache);
815 kmem_cache_free(cache, p);
816 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
817 kmem_cache_destroy(cache);
820 static void kmem_cache_invalid_free(struct kunit *test)
824 struct kmem_cache *cache;
826 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
828 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
830 p = kmem_cache_alloc(cache, GFP_KERNEL);
832 kunit_err(test, "Allocation failed: %s\n", __func__);
833 kmem_cache_destroy(cache);
837 /* Trigger invalid free, the object doesn't get freed. */
838 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
841 * Properly free the object to prevent the "Objects remaining in
842 * test_cache on __kmem_cache_shutdown" BUG failure.
844 kmem_cache_free(cache, p);
846 kmem_cache_destroy(cache);
849 static void kasan_memchr(struct kunit *test)
855 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
856 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
858 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
861 size = round_up(size, OOB_TAG_OFF);
863 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
864 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
866 KUNIT_EXPECT_KASAN_FAIL(test,
867 kasan_ptr_result = memchr(ptr, '1', size + 1));
872 static void kasan_memcmp(struct kunit *test)
879 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
880 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
882 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
885 size = round_up(size, OOB_TAG_OFF);
887 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
888 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
889 memset(arr, 0, sizeof(arr));
891 KUNIT_EXPECT_KASAN_FAIL(test,
892 kasan_int_result = memcmp(ptr, arr, size+1));
896 static void kasan_strings(struct kunit *test)
902 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
903 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
905 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
907 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
908 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
913 * Try to cause only 1 invalid access (less spam in dmesg).
914 * For that we need ptr to point to zeroed byte.
915 * Skip metadata that could be stored in freed object so ptr
916 * will likely point to zeroed byte.
919 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
921 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
923 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
925 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
927 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
929 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
932 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
934 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
935 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
936 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
937 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
938 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
939 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
940 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
941 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
944 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
946 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
947 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
948 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
949 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
950 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
951 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
952 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
953 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
955 #if defined(clear_bit_unlock_is_negative_byte)
956 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
957 clear_bit_unlock_is_negative_byte(nr, addr));
961 static void kasan_bitops_generic(struct kunit *test)
965 /* This test is specifically crafted for the generic mode. */
966 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
969 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
970 * this way we do not actually corrupt other memory.
972 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
973 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
976 * Below calls try to access bit within allocated memory; however, the
977 * below accesses are still out-of-bounds, since bitops are defined to
978 * operate on the whole long the bit is in.
980 kasan_bitops_modify(test, BITS_PER_LONG, bits);
983 * Below calls try to access bit beyond allocated memory.
985 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
990 static void kasan_bitops_tags(struct kunit *test)
994 /* This test is specifically crafted for tag-based modes. */
995 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
997 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
998 bits = kzalloc(48, GFP_KERNEL);
999 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1001 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1002 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1003 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1008 static void kmalloc_double_kzfree(struct kunit *test)
1013 ptr = kmalloc(size, GFP_KERNEL);
1014 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1016 kfree_sensitive(ptr);
1017 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1020 static void vmalloc_oob(struct kunit *test)
1024 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1027 * We have to be careful not to hit the guard page.
1028 * The MMU will catch that and crash us.
1030 area = vmalloc(3000);
1031 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
1033 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
1038 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1039 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1042 static void match_all_not_assigned(struct kunit *test)
1048 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1050 for (i = 0; i < 256; i++) {
1051 size = (get_random_int() % 1024) + 1;
1052 ptr = kmalloc(size, GFP_KERNEL);
1053 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1054 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1055 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1059 for (i = 0; i < 256; i++) {
1060 order = (get_random_int() % 4) + 1;
1061 pages = alloc_pages(GFP_KERNEL, order);
1062 ptr = page_address(pages);
1063 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1064 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1065 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1066 free_pages((unsigned long)ptr, order);
1070 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1071 static void match_all_ptr_tag(struct kunit *test)
1076 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1078 ptr = kmalloc(128, GFP_KERNEL);
1079 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1081 /* Backup the assigned tag. */
1083 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1085 /* Reset the tag to 0xff.*/
1086 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1088 /* This access shouldn't trigger a KASAN report. */
1091 /* Recover the pointer tag and free. */
1092 ptr = set_tag(ptr, tag);
1096 /* Check that there are no match-all memory tags for tag-based modes. */
1097 static void match_all_mem_tag(struct kunit *test)
1102 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1104 ptr = kmalloc(128, GFP_KERNEL);
1105 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1106 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1108 /* For each possible tag value not matching the pointer tag. */
1109 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1110 if (tag == get_tag(ptr))
1113 /* Mark the first memory granule with the chosen memory tag. */
1114 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1116 /* This access must cause a KASAN report. */
1117 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1120 /* Recover the memory tag and free. */
1121 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1125 static struct kunit_case kasan_kunit_test_cases[] = {
1126 KUNIT_CASE(kmalloc_oob_right),
1127 KUNIT_CASE(kmalloc_oob_left),
1128 KUNIT_CASE(kmalloc_node_oob_right),
1129 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1130 KUNIT_CASE(kmalloc_pagealloc_uaf),
1131 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1132 KUNIT_CASE(pagealloc_oob_right),
1133 KUNIT_CASE(pagealloc_uaf),
1134 KUNIT_CASE(kmalloc_large_oob_right),
1135 KUNIT_CASE(krealloc_more_oob),
1136 KUNIT_CASE(krealloc_less_oob),
1137 KUNIT_CASE(krealloc_pagealloc_more_oob),
1138 KUNIT_CASE(krealloc_pagealloc_less_oob),
1139 KUNIT_CASE(krealloc_uaf),
1140 KUNIT_CASE(kmalloc_oob_16),
1141 KUNIT_CASE(kmalloc_uaf_16),
1142 KUNIT_CASE(kmalloc_oob_in_memset),
1143 KUNIT_CASE(kmalloc_oob_memset_2),
1144 KUNIT_CASE(kmalloc_oob_memset_4),
1145 KUNIT_CASE(kmalloc_oob_memset_8),
1146 KUNIT_CASE(kmalloc_oob_memset_16),
1147 KUNIT_CASE(kmalloc_memmove_negative_size),
1148 KUNIT_CASE(kmalloc_memmove_invalid_size),
1149 KUNIT_CASE(kmalloc_uaf),
1150 KUNIT_CASE(kmalloc_uaf_memset),
1151 KUNIT_CASE(kmalloc_uaf2),
1152 KUNIT_CASE(kfree_via_page),
1153 KUNIT_CASE(kfree_via_phys),
1154 KUNIT_CASE(kmem_cache_oob),
1155 KUNIT_CASE(kmem_cache_accounted),
1156 KUNIT_CASE(kmem_cache_bulk),
1157 KUNIT_CASE(kasan_global_oob),
1158 KUNIT_CASE(kasan_stack_oob),
1159 KUNIT_CASE(kasan_alloca_oob_left),
1160 KUNIT_CASE(kasan_alloca_oob_right),
1161 KUNIT_CASE(ksize_unpoisons_memory),
1162 KUNIT_CASE(ksize_uaf),
1163 KUNIT_CASE(kmem_cache_double_free),
1164 KUNIT_CASE(kmem_cache_invalid_free),
1165 KUNIT_CASE(kasan_memchr),
1166 KUNIT_CASE(kasan_memcmp),
1167 KUNIT_CASE(kasan_strings),
1168 KUNIT_CASE(kasan_bitops_generic),
1169 KUNIT_CASE(kasan_bitops_tags),
1170 KUNIT_CASE(kmalloc_double_kzfree),
1171 KUNIT_CASE(vmalloc_oob),
1172 KUNIT_CASE(match_all_not_assigned),
1173 KUNIT_CASE(match_all_ptr_tag),
1174 KUNIT_CASE(match_all_mem_tag),
1178 static struct kunit_suite kasan_kunit_test_suite = {
1180 .init = kasan_test_init,
1181 .test_cases = kasan_kunit_test_cases,
1182 .exit = kasan_test_exit,
1185 kunit_test_suite(kasan_kunit_test_suite);
1187 MODULE_LICENSE("GPL");