1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
25 #include <kunit/test.h>
27 #include "../mm/kasan/kasan.h"
29 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
35 void *kasan_ptr_result;
38 static struct kunit_resource resource;
39 static struct kunit_kasan_expectation fail_data;
40 static bool multishot;
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
48 static int kasan_test_init(struct kunit *test)
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
55 multishot = kasan_save_enable_multi_shot();
56 kasan_set_tagging_report_once(false);
57 fail_data.report_found = false;
58 kunit_add_named_resource(test, NULL, NULL, &resource,
59 "kasan_data", &fail_data);
63 static void kasan_test_exit(struct kunit *test)
65 kasan_set_tagging_report_once(true);
66 kasan_restore_multi_shot(multishot);
67 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
72 * KASAN report; causes a test failure otherwise. This relies on a KUnit
73 * resource named "kasan_data". Do not use this name for KUnit resources
74 * outside of KASAN tests.
76 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
77 * checking is auto-disabled. When this happens, this test handler reenables
78 * tag checking. As tag checking can be only disabled or enabled per CPU,
79 * this handler disables migration (preemption).
81 * Since the compiler doesn't see that the expression can change the fail_data
82 * fields, it can reorder or optimize away the accesses to those fields.
83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
84 * expression to prevent that.
86 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
87 * false. This allows detecting KASAN reports that happen outside of the checks
88 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
89 * and in kasan_test_exit.
91 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
93 !kasan_async_mode_enabled()) \
95 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
99 if (!READ_ONCE(fail_data.report_found)) { \
100 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
101 "expected in \"" #expression \
102 "\", but none occurred"); \
104 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
105 if (READ_ONCE(fail_data.report_found)) \
106 kasan_enable_tagging_sync(); \
109 WRITE_ONCE(fail_data.report_found, false); \
112 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
113 if (!IS_ENABLED(config)) \
114 kunit_skip((test), "Test requires " #config "=y"); \
117 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
118 if (IS_ENABLED(config)) \
119 kunit_skip((test), "Test requires " #config "=n"); \
122 static void kmalloc_oob_right(struct kunit *test)
125 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
127 ptr = kmalloc(size, GFP_KERNEL);
128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
131 * An unaligned access past the requested kmalloc size.
132 * Only generic KASAN can precisely detect these.
134 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
135 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
138 * An aligned access into the first out-of-bounds granule that falls
139 * within the aligned kmalloc object.
141 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
143 /* Out-of-bounds access past the aligned kmalloc object. */
144 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
145 ptr[size + KASAN_GRANULE_SIZE + 5]);
150 static void kmalloc_oob_left(struct kunit *test)
155 ptr = kmalloc(size, GFP_KERNEL);
156 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
158 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
162 static void kmalloc_node_oob_right(struct kunit *test)
167 ptr = kmalloc_node(size, GFP_KERNEL, 0);
168 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
170 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
175 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
176 * fit into a slab cache and therefore is allocated via the page allocator
177 * fallback. Since this kind of fallback is only implemented for SLUB, these
178 * tests are limited to that allocator.
180 static void kmalloc_pagealloc_oob_right(struct kunit *test)
183 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
185 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
187 ptr = kmalloc(size, GFP_KERNEL);
188 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
190 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
195 static void kmalloc_pagealloc_uaf(struct kunit *test)
198 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
200 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
202 ptr = kmalloc(size, GFP_KERNEL);
203 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
206 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
209 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
212 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
214 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
216 ptr = kmalloc(size, GFP_KERNEL);
217 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
219 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
222 static void pagealloc_oob_right(struct kunit *test)
227 size_t size = (1UL << (PAGE_SHIFT + order));
230 * With generic KASAN page allocations have no redzones, thus
231 * out-of-bounds detection is not guaranteed.
232 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
234 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
236 pages = alloc_pages(GFP_KERNEL, order);
237 ptr = page_address(pages);
238 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
240 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
241 free_pages((unsigned long)ptr, order);
244 static void pagealloc_uaf(struct kunit *test)
250 pages = alloc_pages(GFP_KERNEL, order);
251 ptr = page_address(pages);
252 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
253 free_pages((unsigned long)ptr, order);
255 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
258 static void kmalloc_large_oob_right(struct kunit *test)
261 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
264 * Allocate a chunk that is large enough, but still fits into a slab
265 * and does not trigger the page allocator fallback in SLUB.
267 ptr = kmalloc(size, GFP_KERNEL);
268 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
270 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
274 static void krealloc_more_oob_helper(struct kunit *test,
275 size_t size1, size_t size2)
280 KUNIT_ASSERT_LT(test, size1, size2);
281 middle = size1 + (size2 - size1) / 2;
283 ptr1 = kmalloc(size1, GFP_KERNEL);
284 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
286 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
287 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
289 /* All offsets up to size2 must be accessible. */
290 ptr2[size1 - 1] = 'x';
293 ptr2[size2 - 1] = 'x';
295 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
296 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
297 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
299 /* For all modes first aligned offset after size2 must be inaccessible. */
300 KUNIT_EXPECT_KASAN_FAIL(test,
301 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
306 static void krealloc_less_oob_helper(struct kunit *test,
307 size_t size1, size_t size2)
312 KUNIT_ASSERT_LT(test, size2, size1);
313 middle = size2 + (size1 - size2) / 2;
315 ptr1 = kmalloc(size1, GFP_KERNEL);
316 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
318 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
319 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
321 /* Must be accessible for all modes. */
322 ptr2[size2 - 1] = 'x';
324 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
325 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
326 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
328 /* For all modes first aligned offset after size2 must be inaccessible. */
329 KUNIT_EXPECT_KASAN_FAIL(test,
330 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
333 * For all modes all size2, middle, and size1 should land in separate
334 * granules and thus the latter two offsets should be inaccessible.
336 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
337 round_down(middle, KASAN_GRANULE_SIZE));
338 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
339 round_down(size1, KASAN_GRANULE_SIZE));
340 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
341 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
342 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
347 static void krealloc_more_oob(struct kunit *test)
349 krealloc_more_oob_helper(test, 201, 235);
352 static void krealloc_less_oob(struct kunit *test)
354 krealloc_less_oob_helper(test, 235, 201);
357 static void krealloc_pagealloc_more_oob(struct kunit *test)
359 /* page_alloc fallback in only implemented for SLUB. */
360 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
362 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
363 KMALLOC_MAX_CACHE_SIZE + 235);
366 static void krealloc_pagealloc_less_oob(struct kunit *test)
368 /* page_alloc fallback in only implemented for SLUB. */
369 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
371 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
372 KMALLOC_MAX_CACHE_SIZE + 201);
376 * Check that krealloc() detects a use-after-free, returns NULL,
377 * and doesn't unpoison the freed object.
379 static void krealloc_uaf(struct kunit *test)
385 ptr1 = kmalloc(size1, GFP_KERNEL);
386 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
389 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
390 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
391 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
394 static void kmalloc_oob_16(struct kunit *test)
400 /* This test is specifically crafted for the generic mode. */
401 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
403 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
404 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
406 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
407 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
409 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
414 static void kmalloc_uaf_16(struct kunit *test)
420 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
421 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
423 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
427 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
432 * Note: in the memset tests below, the written range touches both valid and
433 * invalid memory. This makes sure that the instrumentation does not only check
434 * the starting address but the whole range.
437 static void kmalloc_oob_memset_2(struct kunit *test)
440 size_t size = 128 - KASAN_GRANULE_SIZE;
442 ptr = kmalloc(size, GFP_KERNEL);
443 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
445 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
449 static void kmalloc_oob_memset_4(struct kunit *test)
452 size_t size = 128 - KASAN_GRANULE_SIZE;
454 ptr = kmalloc(size, GFP_KERNEL);
455 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
457 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
461 static void kmalloc_oob_memset_8(struct kunit *test)
464 size_t size = 128 - KASAN_GRANULE_SIZE;
466 ptr = kmalloc(size, GFP_KERNEL);
467 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
469 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
473 static void kmalloc_oob_memset_16(struct kunit *test)
476 size_t size = 128 - KASAN_GRANULE_SIZE;
478 ptr = kmalloc(size, GFP_KERNEL);
479 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
481 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
485 static void kmalloc_oob_in_memset(struct kunit *test)
488 size_t size = 128 - KASAN_GRANULE_SIZE;
490 ptr = kmalloc(size, GFP_KERNEL);
491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
493 KUNIT_EXPECT_KASAN_FAIL(test,
494 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
498 static void kmalloc_memmove_invalid_size(struct kunit *test)
502 volatile size_t invalid_size = -2;
505 * Hardware tag-based mode doesn't check memmove for negative size.
506 * As a result, this test introduces a side-effect memory corruption,
507 * which can result in a crash.
509 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
511 ptr = kmalloc(size, GFP_KERNEL);
512 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
514 memset((char *)ptr, 0, 64);
515 KUNIT_EXPECT_KASAN_FAIL(test,
516 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
520 static void kmalloc_uaf(struct kunit *test)
525 ptr = kmalloc(size, GFP_KERNEL);
526 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
529 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
532 static void kmalloc_uaf_memset(struct kunit *test)
538 * Only generic KASAN uses quarantine, which is required to avoid a
539 * kernel memory corruption this test causes.
541 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
543 ptr = kmalloc(size, GFP_KERNEL);
544 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
547 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
550 static void kmalloc_uaf2(struct kunit *test)
557 ptr1 = kmalloc(size, GFP_KERNEL);
558 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
562 ptr2 = kmalloc(size, GFP_KERNEL);
563 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
566 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
567 * Allow up to 16 attempts at generating different tags.
569 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
574 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
575 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
580 static void kfree_via_page(struct kunit *test)
585 unsigned long offset;
587 ptr = kmalloc(size, GFP_KERNEL);
588 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
590 page = virt_to_page(ptr);
591 offset = offset_in_page(ptr);
592 kfree(page_address(page) + offset);
595 static void kfree_via_phys(struct kunit *test)
601 ptr = kmalloc(size, GFP_KERNEL);
602 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
604 phys = virt_to_phys(ptr);
605 kfree(phys_to_virt(phys));
608 static void kmem_cache_oob(struct kunit *test)
612 struct kmem_cache *cache;
614 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
615 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
617 p = kmem_cache_alloc(cache, GFP_KERNEL);
619 kunit_err(test, "Allocation failed: %s\n", __func__);
620 kmem_cache_destroy(cache);
624 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
626 kmem_cache_free(cache, p);
627 kmem_cache_destroy(cache);
630 static void kmem_cache_accounted(struct kunit *test)
635 struct kmem_cache *cache;
637 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
638 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
641 * Several allocations with a delay to allow for lazy per memcg kmem
644 for (i = 0; i < 5; i++) {
645 p = kmem_cache_alloc(cache, GFP_KERNEL);
649 kmem_cache_free(cache, p);
654 kmem_cache_destroy(cache);
657 static void kmem_cache_bulk(struct kunit *test)
659 struct kmem_cache *cache;
665 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
666 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
668 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
670 kunit_err(test, "Allocation failed: %s\n", __func__);
671 kmem_cache_destroy(cache);
675 for (i = 0; i < ARRAY_SIZE(p); i++)
676 p[i][0] = p[i][size - 1] = 42;
678 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
679 kmem_cache_destroy(cache);
682 static char global_array[10];
684 static void kasan_global_oob(struct kunit *test)
687 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
688 * from failing here and panicking the kernel, access the array via a
689 * volatile pointer, which will prevent the compiler from being able to
690 * determine the array bounds.
692 * This access uses a volatile pointer to char (char *volatile) rather
693 * than the more conventional pointer to volatile char (volatile char *)
694 * because we want to prevent the compiler from making inferences about
695 * the pointer itself (i.e. its array bounds), not the data that it
698 char *volatile array = global_array;
699 char *p = &array[ARRAY_SIZE(global_array) + 3];
701 /* Only generic mode instruments globals. */
702 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
704 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
707 /* Check that ksize() makes the whole object accessible. */
708 static void ksize_unpoisons_memory(struct kunit *test)
711 size_t size = 123, real_size;
713 ptr = kmalloc(size, GFP_KERNEL);
714 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
715 real_size = ksize(ptr);
717 /* This access shouldn't trigger a KASAN report. */
721 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
727 * Check that a use-after-free is detected by ksize() and via normal accesses
730 static void ksize_uaf(struct kunit *test)
733 int size = 128 - KASAN_GRANULE_SIZE;
735 ptr = kmalloc(size, GFP_KERNEL);
736 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
739 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
740 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
741 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
744 static void kasan_stack_oob(struct kunit *test)
746 char stack_array[10];
747 /* See comment in kasan_global_oob. */
748 char *volatile array = stack_array;
749 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
751 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
753 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
756 static void kasan_alloca_oob_left(struct kunit *test)
759 char alloca_array[i];
760 /* See comment in kasan_global_oob. */
761 char *volatile array = alloca_array;
764 /* Only generic mode instruments dynamic allocas. */
765 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
766 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
768 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
771 static void kasan_alloca_oob_right(struct kunit *test)
774 char alloca_array[i];
775 /* See comment in kasan_global_oob. */
776 char *volatile array = alloca_array;
779 /* Only generic mode instruments dynamic allocas. */
780 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
781 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
783 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
786 static void kmem_cache_double_free(struct kunit *test)
790 struct kmem_cache *cache;
792 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
793 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
795 p = kmem_cache_alloc(cache, GFP_KERNEL);
797 kunit_err(test, "Allocation failed: %s\n", __func__);
798 kmem_cache_destroy(cache);
802 kmem_cache_free(cache, p);
803 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
804 kmem_cache_destroy(cache);
807 static void kmem_cache_invalid_free(struct kunit *test)
811 struct kmem_cache *cache;
813 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
815 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
817 p = kmem_cache_alloc(cache, GFP_KERNEL);
819 kunit_err(test, "Allocation failed: %s\n", __func__);
820 kmem_cache_destroy(cache);
824 /* Trigger invalid free, the object doesn't get freed. */
825 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
828 * Properly free the object to prevent the "Objects remaining in
829 * test_cache on __kmem_cache_shutdown" BUG failure.
831 kmem_cache_free(cache, p);
833 kmem_cache_destroy(cache);
836 static void kasan_memchr(struct kunit *test)
842 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
843 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
845 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
848 size = round_up(size, OOB_TAG_OFF);
850 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
851 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
853 KUNIT_EXPECT_KASAN_FAIL(test,
854 kasan_ptr_result = memchr(ptr, '1', size + 1));
859 static void kasan_memcmp(struct kunit *test)
866 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
867 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
869 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
872 size = round_up(size, OOB_TAG_OFF);
874 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
875 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
876 memset(arr, 0, sizeof(arr));
878 KUNIT_EXPECT_KASAN_FAIL(test,
879 kasan_int_result = memcmp(ptr, arr, size+1));
883 static void kasan_strings(struct kunit *test)
889 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
890 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
892 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
894 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
895 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
900 * Try to cause only 1 invalid access (less spam in dmesg).
901 * For that we need ptr to point to zeroed byte.
902 * Skip metadata that could be stored in freed object so ptr
903 * will likely point to zeroed byte.
906 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
908 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
910 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
912 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
914 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
916 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
919 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
921 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
922 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
923 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
924 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
925 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
926 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
927 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
928 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
931 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
933 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
934 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
935 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
936 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
937 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
938 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
939 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
940 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
942 #if defined(clear_bit_unlock_is_negative_byte)
943 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
944 clear_bit_unlock_is_negative_byte(nr, addr));
948 static void kasan_bitops_generic(struct kunit *test)
952 /* This test is specifically crafted for the generic mode. */
953 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
956 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
957 * this way we do not actually corrupt other memory.
959 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
960 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
963 * Below calls try to access bit within allocated memory; however, the
964 * below accesses are still out-of-bounds, since bitops are defined to
965 * operate on the whole long the bit is in.
967 kasan_bitops_modify(test, BITS_PER_LONG, bits);
970 * Below calls try to access bit beyond allocated memory.
972 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
977 static void kasan_bitops_tags(struct kunit *test)
981 /* This test is specifically crafted for tag-based modes. */
982 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
984 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
985 bits = kzalloc(48, GFP_KERNEL);
986 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
988 /* Do the accesses past the 48 allocated bytes, but within the redone. */
989 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
990 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
995 static void kmalloc_double_kzfree(struct kunit *test)
1000 ptr = kmalloc(size, GFP_KERNEL);
1001 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1003 kfree_sensitive(ptr);
1004 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1007 static void vmalloc_oob(struct kunit *test)
1011 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1014 * We have to be careful not to hit the guard page.
1015 * The MMU will catch that and crash us.
1017 area = vmalloc(3000);
1018 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
1020 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
1025 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1026 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1029 static void match_all_not_assigned(struct kunit *test)
1035 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1037 for (i = 0; i < 256; i++) {
1038 size = (get_random_int() % 1024) + 1;
1039 ptr = kmalloc(size, GFP_KERNEL);
1040 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1041 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1042 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1046 for (i = 0; i < 256; i++) {
1047 order = (get_random_int() % 4) + 1;
1048 pages = alloc_pages(GFP_KERNEL, order);
1049 ptr = page_address(pages);
1050 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1051 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1052 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1053 free_pages((unsigned long)ptr, order);
1057 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1058 static void match_all_ptr_tag(struct kunit *test)
1063 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1065 ptr = kmalloc(128, GFP_KERNEL);
1066 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1068 /* Backup the assigned tag. */
1070 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1072 /* Reset the tag to 0xff.*/
1073 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1075 /* This access shouldn't trigger a KASAN report. */
1078 /* Recover the pointer tag and free. */
1079 ptr = set_tag(ptr, tag);
1083 /* Check that there are no match-all memory tags for tag-based modes. */
1084 static void match_all_mem_tag(struct kunit *test)
1089 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1091 ptr = kmalloc(128, GFP_KERNEL);
1092 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1093 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1095 /* For each possible tag value not matching the pointer tag. */
1096 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1097 if (tag == get_tag(ptr))
1100 /* Mark the first memory granule with the chosen memory tag. */
1101 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1103 /* This access must cause a KASAN report. */
1104 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1107 /* Recover the memory tag and free. */
1108 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1112 static struct kunit_case kasan_kunit_test_cases[] = {
1113 KUNIT_CASE(kmalloc_oob_right),
1114 KUNIT_CASE(kmalloc_oob_left),
1115 KUNIT_CASE(kmalloc_node_oob_right),
1116 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1117 KUNIT_CASE(kmalloc_pagealloc_uaf),
1118 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1119 KUNIT_CASE(pagealloc_oob_right),
1120 KUNIT_CASE(pagealloc_uaf),
1121 KUNIT_CASE(kmalloc_large_oob_right),
1122 KUNIT_CASE(krealloc_more_oob),
1123 KUNIT_CASE(krealloc_less_oob),
1124 KUNIT_CASE(krealloc_pagealloc_more_oob),
1125 KUNIT_CASE(krealloc_pagealloc_less_oob),
1126 KUNIT_CASE(krealloc_uaf),
1127 KUNIT_CASE(kmalloc_oob_16),
1128 KUNIT_CASE(kmalloc_uaf_16),
1129 KUNIT_CASE(kmalloc_oob_in_memset),
1130 KUNIT_CASE(kmalloc_oob_memset_2),
1131 KUNIT_CASE(kmalloc_oob_memset_4),
1132 KUNIT_CASE(kmalloc_oob_memset_8),
1133 KUNIT_CASE(kmalloc_oob_memset_16),
1134 KUNIT_CASE(kmalloc_memmove_invalid_size),
1135 KUNIT_CASE(kmalloc_uaf),
1136 KUNIT_CASE(kmalloc_uaf_memset),
1137 KUNIT_CASE(kmalloc_uaf2),
1138 KUNIT_CASE(kfree_via_page),
1139 KUNIT_CASE(kfree_via_phys),
1140 KUNIT_CASE(kmem_cache_oob),
1141 KUNIT_CASE(kmem_cache_accounted),
1142 KUNIT_CASE(kmem_cache_bulk),
1143 KUNIT_CASE(kasan_global_oob),
1144 KUNIT_CASE(kasan_stack_oob),
1145 KUNIT_CASE(kasan_alloca_oob_left),
1146 KUNIT_CASE(kasan_alloca_oob_right),
1147 KUNIT_CASE(ksize_unpoisons_memory),
1148 KUNIT_CASE(ksize_uaf),
1149 KUNIT_CASE(kmem_cache_double_free),
1150 KUNIT_CASE(kmem_cache_invalid_free),
1151 KUNIT_CASE(kasan_memchr),
1152 KUNIT_CASE(kasan_memcmp),
1153 KUNIT_CASE(kasan_strings),
1154 KUNIT_CASE(kasan_bitops_generic),
1155 KUNIT_CASE(kasan_bitops_tags),
1156 KUNIT_CASE(kmalloc_double_kzfree),
1157 KUNIT_CASE(vmalloc_oob),
1158 KUNIT_CASE(match_all_not_assigned),
1159 KUNIT_CASE(match_all_ptr_tag),
1160 KUNIT_CASE(match_all_mem_tag),
1164 static struct kunit_suite kasan_kunit_test_suite = {
1166 .init = kasan_test_init,
1167 .test_cases = kasan_kunit_test_cases,
1168 .exit = kasan_test_exit,
1171 kunit_test_suite(kasan_kunit_test_suite);
1173 MODULE_LICENSE("GPL");