1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
25 #include <kunit/test.h>
27 #include "../mm/kasan/kasan.h"
29 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
35 void *kasan_ptr_result;
38 static struct kunit_resource resource;
39 static struct kunit_kasan_expectation fail_data;
40 static bool multishot;
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
48 static int kasan_test_init(struct kunit *test)
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
55 multishot = kasan_save_enable_multi_shot();
56 fail_data.report_found = false;
57 kunit_add_named_resource(test, NULL, NULL, &resource,
58 "kasan_data", &fail_data);
62 static void kasan_test_exit(struct kunit *test)
64 kasan_restore_multi_shot(multishot);
65 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
69 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
70 * KASAN report; causes a test failure otherwise. This relies on a KUnit
71 * resource named "kasan_data". Do not use this name for KUnit resources
72 * outside of KASAN tests.
74 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
75 * checking is auto-disabled. When this happens, this test handler reenables
76 * tag checking. As tag checking can be only disabled or enabled per CPU,
77 * this handler disables migration (preemption).
79 * Since the compiler doesn't see that the expression can change the fail_data
80 * fields, it can reorder or optimize away the accesses to those fields.
81 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
82 * expression to prevent that.
84 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
85 * false. This allows detecting KASAN reports that happen outside of the checks
86 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
87 * and in kasan_test_exit.
89 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
90 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
91 !kasan_async_mode_enabled()) \
93 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
97 if (!READ_ONCE(fail_data.report_found)) { \
98 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
99 "expected in \"" #expression \
100 "\", but none occurred"); \
102 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
103 if (READ_ONCE(fail_data.report_found)) \
104 kasan_enable_tagging_sync(); \
107 WRITE_ONCE(fail_data.report_found, false); \
110 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
111 if (!IS_ENABLED(config)) \
112 kunit_skip((test), "Test requires " #config "=y"); \
115 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
116 if (IS_ENABLED(config)) \
117 kunit_skip((test), "Test requires " #config "=n"); \
120 static void kmalloc_oob_right(struct kunit *test)
125 ptr = kmalloc(size, GFP_KERNEL);
126 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
128 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
132 static void kmalloc_oob_left(struct kunit *test)
137 ptr = kmalloc(size, GFP_KERNEL);
138 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
140 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
144 static void kmalloc_node_oob_right(struct kunit *test)
149 ptr = kmalloc_node(size, GFP_KERNEL, 0);
150 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
152 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
157 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
158 * fit into a slab cache and therefore is allocated via the page allocator
159 * fallback. Since this kind of fallback is only implemented for SLUB, these
160 * tests are limited to that allocator.
162 static void kmalloc_pagealloc_oob_right(struct kunit *test)
165 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
167 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
169 ptr = kmalloc(size, GFP_KERNEL);
170 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
172 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
177 static void kmalloc_pagealloc_uaf(struct kunit *test)
180 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
182 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
184 ptr = kmalloc(size, GFP_KERNEL);
185 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
188 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
191 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
194 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
196 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
198 ptr = kmalloc(size, GFP_KERNEL);
199 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
201 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
204 static void pagealloc_oob_right(struct kunit *test)
209 size_t size = (1UL << (PAGE_SHIFT + order));
212 * With generic KASAN page allocations have no redzones, thus
213 * out-of-bounds detection is not guaranteed.
214 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
216 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
218 pages = alloc_pages(GFP_KERNEL, order);
219 ptr = page_address(pages);
220 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
222 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
223 free_pages((unsigned long)ptr, order);
226 static void pagealloc_uaf(struct kunit *test)
232 pages = alloc_pages(GFP_KERNEL, order);
233 ptr = page_address(pages);
234 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
235 free_pages((unsigned long)ptr, order);
237 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
240 static void kmalloc_large_oob_right(struct kunit *test)
243 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
246 * Allocate a chunk that is large enough, but still fits into a slab
247 * and does not trigger the page allocator fallback in SLUB.
249 ptr = kmalloc(size, GFP_KERNEL);
250 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
252 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
256 static void krealloc_more_oob_helper(struct kunit *test,
257 size_t size1, size_t size2)
262 KUNIT_ASSERT_LT(test, size1, size2);
263 middle = size1 + (size2 - size1) / 2;
265 ptr1 = kmalloc(size1, GFP_KERNEL);
266 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
268 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
269 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
271 /* All offsets up to size2 must be accessible. */
272 ptr2[size1 - 1] = 'x';
275 ptr2[size2 - 1] = 'x';
277 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
278 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
279 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
281 /* For all modes first aligned offset after size2 must be inaccessible. */
282 KUNIT_EXPECT_KASAN_FAIL(test,
283 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
288 static void krealloc_less_oob_helper(struct kunit *test,
289 size_t size1, size_t size2)
294 KUNIT_ASSERT_LT(test, size2, size1);
295 middle = size2 + (size1 - size2) / 2;
297 ptr1 = kmalloc(size1, GFP_KERNEL);
298 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
300 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
301 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
303 /* Must be accessible for all modes. */
304 ptr2[size2 - 1] = 'x';
306 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
307 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
308 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
310 /* For all modes first aligned offset after size2 must be inaccessible. */
311 KUNIT_EXPECT_KASAN_FAIL(test,
312 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
315 * For all modes all size2, middle, and size1 should land in separate
316 * granules and thus the latter two offsets should be inaccessible.
318 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
319 round_down(middle, KASAN_GRANULE_SIZE));
320 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
321 round_down(size1, KASAN_GRANULE_SIZE));
322 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
323 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
324 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
329 static void krealloc_more_oob(struct kunit *test)
331 krealloc_more_oob_helper(test, 201, 235);
334 static void krealloc_less_oob(struct kunit *test)
336 krealloc_less_oob_helper(test, 235, 201);
339 static void krealloc_pagealloc_more_oob(struct kunit *test)
341 /* page_alloc fallback in only implemented for SLUB. */
342 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
344 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
345 KMALLOC_MAX_CACHE_SIZE + 235);
348 static void krealloc_pagealloc_less_oob(struct kunit *test)
350 /* page_alloc fallback in only implemented for SLUB. */
351 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
353 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
354 KMALLOC_MAX_CACHE_SIZE + 201);
358 * Check that krealloc() detects a use-after-free, returns NULL,
359 * and doesn't unpoison the freed object.
361 static void krealloc_uaf(struct kunit *test)
367 ptr1 = kmalloc(size1, GFP_KERNEL);
368 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
371 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
372 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
373 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
376 static void kmalloc_oob_16(struct kunit *test)
382 /* This test is specifically crafted for the generic mode. */
383 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
385 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
386 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
388 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
389 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
391 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
396 static void kmalloc_uaf_16(struct kunit *test)
402 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
403 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
405 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
406 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
409 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
413 static void kmalloc_oob_memset_2(struct kunit *test)
418 ptr = kmalloc(size, GFP_KERNEL);
419 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
421 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
425 static void kmalloc_oob_memset_4(struct kunit *test)
430 ptr = kmalloc(size, GFP_KERNEL);
431 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
433 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
438 static void kmalloc_oob_memset_8(struct kunit *test)
443 ptr = kmalloc(size, GFP_KERNEL);
444 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
446 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
450 static void kmalloc_oob_memset_16(struct kunit *test)
455 ptr = kmalloc(size, GFP_KERNEL);
456 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
458 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
462 static void kmalloc_oob_in_memset(struct kunit *test)
467 ptr = kmalloc(size, GFP_KERNEL);
468 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
470 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
474 static void kmalloc_memmove_invalid_size(struct kunit *test)
478 volatile size_t invalid_size = -2;
480 ptr = kmalloc(size, GFP_KERNEL);
481 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
483 memset((char *)ptr, 0, 64);
485 KUNIT_EXPECT_KASAN_FAIL(test,
486 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
490 static void kmalloc_uaf(struct kunit *test)
495 ptr = kmalloc(size, GFP_KERNEL);
496 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
499 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
502 static void kmalloc_uaf_memset(struct kunit *test)
507 ptr = kmalloc(size, GFP_KERNEL);
508 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
511 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
514 static void kmalloc_uaf2(struct kunit *test)
521 ptr1 = kmalloc(size, GFP_KERNEL);
522 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
526 ptr2 = kmalloc(size, GFP_KERNEL);
527 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
530 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
531 * Allow up to 16 attempts at generating different tags.
533 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
538 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
539 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
544 static void kfree_via_page(struct kunit *test)
549 unsigned long offset;
551 ptr = kmalloc(size, GFP_KERNEL);
552 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
554 page = virt_to_page(ptr);
555 offset = offset_in_page(ptr);
556 kfree(page_address(page) + offset);
559 static void kfree_via_phys(struct kunit *test)
565 ptr = kmalloc(size, GFP_KERNEL);
566 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
568 phys = virt_to_phys(ptr);
569 kfree(phys_to_virt(phys));
572 static void kmem_cache_oob(struct kunit *test)
576 struct kmem_cache *cache;
578 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
579 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
581 p = kmem_cache_alloc(cache, GFP_KERNEL);
583 kunit_err(test, "Allocation failed: %s\n", __func__);
584 kmem_cache_destroy(cache);
588 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
590 kmem_cache_free(cache, p);
591 kmem_cache_destroy(cache);
594 static void kmem_cache_accounted(struct kunit *test)
599 struct kmem_cache *cache;
601 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
602 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
605 * Several allocations with a delay to allow for lazy per memcg kmem
608 for (i = 0; i < 5; i++) {
609 p = kmem_cache_alloc(cache, GFP_KERNEL);
613 kmem_cache_free(cache, p);
618 kmem_cache_destroy(cache);
621 static void kmem_cache_bulk(struct kunit *test)
623 struct kmem_cache *cache;
629 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
630 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
632 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
634 kunit_err(test, "Allocation failed: %s\n", __func__);
635 kmem_cache_destroy(cache);
639 for (i = 0; i < ARRAY_SIZE(p); i++)
640 p[i][0] = p[i][size - 1] = 42;
642 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
643 kmem_cache_destroy(cache);
646 static char global_array[10];
648 static void kasan_global_oob(struct kunit *test)
651 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
652 * from failing here and panicking the kernel, access the array via a
653 * volatile pointer, which will prevent the compiler from being able to
654 * determine the array bounds.
656 * This access uses a volatile pointer to char (char *volatile) rather
657 * than the more conventional pointer to volatile char (volatile char *)
658 * because we want to prevent the compiler from making inferences about
659 * the pointer itself (i.e. its array bounds), not the data that it
662 char *volatile array = global_array;
663 char *p = &array[ARRAY_SIZE(global_array) + 3];
665 /* Only generic mode instruments globals. */
666 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
668 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
671 /* Check that ksize() makes the whole object accessible. */
672 static void ksize_unpoisons_memory(struct kunit *test)
675 size_t size = 123, real_size;
677 ptr = kmalloc(size, GFP_KERNEL);
678 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
679 real_size = ksize(ptr);
681 /* This access shouldn't trigger a KASAN report. */
685 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
691 * Check that a use-after-free is detected by ksize() and via normal accesses
694 static void ksize_uaf(struct kunit *test)
697 int size = 128 - KASAN_GRANULE_SIZE;
699 ptr = kmalloc(size, GFP_KERNEL);
700 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
703 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
704 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
705 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
708 static void kasan_stack_oob(struct kunit *test)
710 char stack_array[10];
711 /* See comment in kasan_global_oob. */
712 char *volatile array = stack_array;
713 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
715 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
717 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
720 static void kasan_alloca_oob_left(struct kunit *test)
723 char alloca_array[i];
724 /* See comment in kasan_global_oob. */
725 char *volatile array = alloca_array;
728 /* Only generic mode instruments dynamic allocas. */
729 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
730 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
732 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
735 static void kasan_alloca_oob_right(struct kunit *test)
738 char alloca_array[i];
739 /* See comment in kasan_global_oob. */
740 char *volatile array = alloca_array;
743 /* Only generic mode instruments dynamic allocas. */
744 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
745 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
747 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
750 static void kmem_cache_double_free(struct kunit *test)
754 struct kmem_cache *cache;
756 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
757 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
759 p = kmem_cache_alloc(cache, GFP_KERNEL);
761 kunit_err(test, "Allocation failed: %s\n", __func__);
762 kmem_cache_destroy(cache);
766 kmem_cache_free(cache, p);
767 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
768 kmem_cache_destroy(cache);
771 static void kmem_cache_invalid_free(struct kunit *test)
775 struct kmem_cache *cache;
777 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
779 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
781 p = kmem_cache_alloc(cache, GFP_KERNEL);
783 kunit_err(test, "Allocation failed: %s\n", __func__);
784 kmem_cache_destroy(cache);
788 /* Trigger invalid free, the object doesn't get freed. */
789 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
792 * Properly free the object to prevent the "Objects remaining in
793 * test_cache on __kmem_cache_shutdown" BUG failure.
795 kmem_cache_free(cache, p);
797 kmem_cache_destroy(cache);
800 static void kasan_memchr(struct kunit *test)
806 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
807 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
809 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
812 size = round_up(size, OOB_TAG_OFF);
814 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
815 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
817 KUNIT_EXPECT_KASAN_FAIL(test,
818 kasan_ptr_result = memchr(ptr, '1', size + 1));
823 static void kasan_memcmp(struct kunit *test)
830 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
831 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
833 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
836 size = round_up(size, OOB_TAG_OFF);
838 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
839 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
840 memset(arr, 0, sizeof(arr));
842 KUNIT_EXPECT_KASAN_FAIL(test,
843 kasan_int_result = memcmp(ptr, arr, size+1));
847 static void kasan_strings(struct kunit *test)
853 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
854 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
856 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
858 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
859 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
864 * Try to cause only 1 invalid access (less spam in dmesg).
865 * For that we need ptr to point to zeroed byte.
866 * Skip metadata that could be stored in freed object so ptr
867 * will likely point to zeroed byte.
870 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
872 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
874 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
876 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
878 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
880 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
883 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
885 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
886 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
887 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
888 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
889 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
890 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
891 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
892 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
895 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
897 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
898 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
899 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
900 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
901 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
902 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
903 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
904 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
906 #if defined(clear_bit_unlock_is_negative_byte)
907 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
908 clear_bit_unlock_is_negative_byte(nr, addr));
912 static void kasan_bitops_generic(struct kunit *test)
916 /* This test is specifically crafted for the generic mode. */
917 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
920 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
921 * this way we do not actually corrupt other memory.
923 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
924 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
927 * Below calls try to access bit within allocated memory; however, the
928 * below accesses are still out-of-bounds, since bitops are defined to
929 * operate on the whole long the bit is in.
931 kasan_bitops_modify(test, BITS_PER_LONG, bits);
934 * Below calls try to access bit beyond allocated memory.
936 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
941 static void kasan_bitops_tags(struct kunit *test)
945 /* This test is specifically crafted for tag-based modes. */
946 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
948 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
949 bits = kzalloc(48, GFP_KERNEL);
950 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
952 /* Do the accesses past the 48 allocated bytes, but within the redone. */
953 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
954 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
959 static void kmalloc_double_kzfree(struct kunit *test)
964 ptr = kmalloc(size, GFP_KERNEL);
965 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
967 kfree_sensitive(ptr);
968 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
971 static void vmalloc_oob(struct kunit *test)
975 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
978 * We have to be careful not to hit the guard page.
979 * The MMU will catch that and crash us.
981 area = vmalloc(3000);
982 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
984 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
989 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
990 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
993 static void match_all_not_assigned(struct kunit *test)
999 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1001 for (i = 0; i < 256; i++) {
1002 size = (get_random_int() % 1024) + 1;
1003 ptr = kmalloc(size, GFP_KERNEL);
1004 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1005 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1006 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1010 for (i = 0; i < 256; i++) {
1011 order = (get_random_int() % 4) + 1;
1012 pages = alloc_pages(GFP_KERNEL, order);
1013 ptr = page_address(pages);
1014 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1015 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1016 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1017 free_pages((unsigned long)ptr, order);
1021 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1022 static void match_all_ptr_tag(struct kunit *test)
1027 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1029 ptr = kmalloc(128, GFP_KERNEL);
1030 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1032 /* Backup the assigned tag. */
1034 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1036 /* Reset the tag to 0xff.*/
1037 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1039 /* This access shouldn't trigger a KASAN report. */
1042 /* Recover the pointer tag and free. */
1043 ptr = set_tag(ptr, tag);
1047 /* Check that there are no match-all memory tags for tag-based modes. */
1048 static void match_all_mem_tag(struct kunit *test)
1053 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1055 ptr = kmalloc(128, GFP_KERNEL);
1056 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1057 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1059 /* For each possible tag value not matching the pointer tag. */
1060 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1061 if (tag == get_tag(ptr))
1064 /* Mark the first memory granule with the chosen memory tag. */
1065 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1067 /* This access must cause a KASAN report. */
1068 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1071 /* Recover the memory tag and free. */
1072 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1076 static struct kunit_case kasan_kunit_test_cases[] = {
1077 KUNIT_CASE(kmalloc_oob_right),
1078 KUNIT_CASE(kmalloc_oob_left),
1079 KUNIT_CASE(kmalloc_node_oob_right),
1080 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1081 KUNIT_CASE(kmalloc_pagealloc_uaf),
1082 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1083 KUNIT_CASE(pagealloc_oob_right),
1084 KUNIT_CASE(pagealloc_uaf),
1085 KUNIT_CASE(kmalloc_large_oob_right),
1086 KUNIT_CASE(krealloc_more_oob),
1087 KUNIT_CASE(krealloc_less_oob),
1088 KUNIT_CASE(krealloc_pagealloc_more_oob),
1089 KUNIT_CASE(krealloc_pagealloc_less_oob),
1090 KUNIT_CASE(krealloc_uaf),
1091 KUNIT_CASE(kmalloc_oob_16),
1092 KUNIT_CASE(kmalloc_uaf_16),
1093 KUNIT_CASE(kmalloc_oob_in_memset),
1094 KUNIT_CASE(kmalloc_oob_memset_2),
1095 KUNIT_CASE(kmalloc_oob_memset_4),
1096 KUNIT_CASE(kmalloc_oob_memset_8),
1097 KUNIT_CASE(kmalloc_oob_memset_16),
1098 KUNIT_CASE(kmalloc_memmove_invalid_size),
1099 KUNIT_CASE(kmalloc_uaf),
1100 KUNIT_CASE(kmalloc_uaf_memset),
1101 KUNIT_CASE(kmalloc_uaf2),
1102 KUNIT_CASE(kfree_via_page),
1103 KUNIT_CASE(kfree_via_phys),
1104 KUNIT_CASE(kmem_cache_oob),
1105 KUNIT_CASE(kmem_cache_accounted),
1106 KUNIT_CASE(kmem_cache_bulk),
1107 KUNIT_CASE(kasan_global_oob),
1108 KUNIT_CASE(kasan_stack_oob),
1109 KUNIT_CASE(kasan_alloca_oob_left),
1110 KUNIT_CASE(kasan_alloca_oob_right),
1111 KUNIT_CASE(ksize_unpoisons_memory),
1112 KUNIT_CASE(ksize_uaf),
1113 KUNIT_CASE(kmem_cache_double_free),
1114 KUNIT_CASE(kmem_cache_invalid_free),
1115 KUNIT_CASE(kasan_memchr),
1116 KUNIT_CASE(kasan_memcmp),
1117 KUNIT_CASE(kasan_strings),
1118 KUNIT_CASE(kasan_bitops_generic),
1119 KUNIT_CASE(kasan_bitops_tags),
1120 KUNIT_CASE(kmalloc_double_kzfree),
1121 KUNIT_CASE(vmalloc_oob),
1122 KUNIT_CASE(match_all_not_assigned),
1123 KUNIT_CASE(match_all_ptr_tag),
1124 KUNIT_CASE(match_all_mem_tag),
1128 static struct kunit_suite kasan_kunit_test_suite = {
1130 .init = kasan_test_init,
1131 .test_cases = kasan_kunit_test_cases,
1132 .exit = kasan_test_exit,
1135 kunit_test_suite(kasan_kunit_test_suite);
1137 MODULE_LICENSE("GPL");