X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=lib%2Ftest_kasan.c;h=cacbbbdef768d056d010ebde123353192f502c50;hb=57143f2e5b41f3e51b13d3c358e29a932334110d;hp=785e724ce0d8881b780ca4602d2b871d90ff9093;hpb=6c0029211382011af508273c4fc98a732f841d95;p=linux-2.6-microblaze.git diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 785e724ce0d8..cacbbbdef768 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test) multishot = kasan_save_enable_multi_shot(); kasan_set_tagging_report_once(false); + fail_data.report_found = false; + fail_data.report_expected = false; + kunit_add_named_resource(test, NULL, NULL, &resource, + "kasan_data", &fail_data); return 0; } @@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test) { kasan_set_tagging_report_once(true); kasan_restore_multi_shot(multishot); + KUNIT_EXPECT_FALSE(test, fail_data.report_found); } /** @@ -78,33 +83,31 @@ static void kasan_test_exit(struct kunit *test) * fields, it can reorder or optimize away the accesses to those fields. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the * expression to prevent that. + * + * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as + * false. This allows detecting KASAN reports that happen outside of the checks + * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL + * and in kasan_test_exit. */ -#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) \ - migrate_disable(); \ - WRITE_ONCE(fail_data.report_expected, true); \ - WRITE_ONCE(fail_data.report_found, false); \ - kunit_add_named_resource(test, \ - NULL, \ - NULL, \ - &resource, \ - "kasan_data", &fail_data); \ - barrier(); \ - expression; \ - barrier(); \ - if (kasan_async_mode_enabled()) \ - kasan_force_async_fault(); \ - barrier(); \ - KUNIT_EXPECT_EQ(test, \ - READ_ONCE(fail_data.report_expected), \ - READ_ONCE(fail_data.report_found)); \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) { \ - if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging_sync(); \ - migrate_enable(); \ - } \ +#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) \ + migrate_disable(); \ + KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ + WRITE_ONCE(fail_data.report_expected, true); \ + barrier(); \ + expression; \ + barrier(); \ + KUNIT_EXPECT_EQ(test, \ + READ_ONCE(fail_data.report_expected), \ + READ_ONCE(fail_data.report_found)); \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ + if (READ_ONCE(fail_data.report_found)) \ + kasan_enable_tagging_sync(); \ + migrate_enable(); \ + } \ + WRITE_ONCE(fail_data.report_found, false); \ + WRITE_ONCE(fail_data.report_expected, false); \ } while (0) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ @@ -651,8 +654,20 @@ static char global_array[10]; static void kasan_global_oob(struct kunit *test) { - volatile int i = 3; - char *p = &global_array[ARRAY_SIZE(global_array) + i]; + /* + * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS + * from failing here and panicing the kernel, access the array via a + * volatile pointer, which will prevent the compiler from being able to + * determine the array bounds. + * + * This access uses a volatile pointer to char (char *volatile) rather + * than the more conventional pointer to volatile char (volatile char *) + * because we want to prevent the compiler from making inferences about + * the pointer itself (i.e. its array bounds), not the data that it + * refers to. + */ + char *volatile array = global_array; + char *p = &array[ARRAY_SIZE(global_array) + 3]; /* Only generic mode instruments globals. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); @@ -700,8 +715,9 @@ static void ksize_uaf(struct kunit *test) static void kasan_stack_oob(struct kunit *test) { char stack_array[10]; - volatile int i = OOB_TAG_OFF; - char *p = &stack_array[ARRAY_SIZE(stack_array) + i]; + /* See comment in kasan_global_oob. */ + char *volatile array = stack_array; + char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF]; KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); @@ -712,7 +728,9 @@ static void kasan_alloca_oob_left(struct kunit *test) { volatile int i = 10; char alloca_array[i]; - char *p = alloca_array - 1; + /* See comment in kasan_global_oob. */ + char *volatile array = alloca_array; + char *p = array - 1; /* Only generic mode instruments dynamic allocas. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); @@ -725,7 +743,9 @@ static void kasan_alloca_oob_right(struct kunit *test) { volatile int i = 10; char alloca_array[i]; - char *p = alloca_array + i; + /* See comment in kasan_global_oob. */ + char *volatile array = alloca_array; + char *p = array + i; /* Only generic mode instruments dynamic allocas. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); @@ -1049,14 +1069,14 @@ static void match_all_mem_tag(struct kunit *test) continue; /* Mark the first memory granule with the chosen memory tag. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag); + kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); /* This access must cause a KASAN report. */ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); } /* Recover the memory tag and free. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr)); + kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); kfree(ptr); }