ARM: dts: Group omap3 CM_ICLKEN3_CORE clocks
[linux-2.6-microblaze.git] / lib / test_kasan.c
index 26a5c90..ad88023 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/vmalloc.h>
+#include <linux/set_memory.h>
 
 #include <asm/page.h>
 
@@ -36,7 +37,7 @@ void *kasan_ptr_result;
 int kasan_int_result;
 
 static struct kunit_resource resource;
-static struct kunit_kasan_expectation fail_data;
+static struct kunit_kasan_status test_status;
 static bool multishot;
 
 /*
@@ -53,58 +54,63 @@ static int kasan_test_init(struct kunit *test)
        }
 
        multishot = kasan_save_enable_multi_shot();
-       fail_data.report_found = false;
+       test_status.report_found = false;
+       test_status.sync_fault = false;
        kunit_add_named_resource(test, NULL, NULL, &resource,
-                                       "kasan_data", &fail_data);
+                                       "kasan_status", &test_status);
        return 0;
 }
 
 static void kasan_test_exit(struct kunit *test)
 {
        kasan_restore_multi_shot(multishot);
-       KUNIT_EXPECT_FALSE(test, fail_data.report_found);
+       KUNIT_EXPECT_FALSE(test, test_status.report_found);
 }
 
 /**
  * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
  * KASAN report; causes a test failure otherwise. This relies on a KUnit
- * resource named "kasan_data". Do not use this name for KUnit resources
+ * resource named "kasan_status". Do not use this name for KUnit resources
  * outside of KASAN tests.
  *
- * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
+ * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
  * checking is auto-disabled. When this happens, this test handler reenables
  * tag checking. As tag checking can be only disabled or enabled per CPU,
  * this handler disables migration (preemption).
  *
- * Since the compiler doesn't see that the expression can change the fail_data
+ * Since the compiler doesn't see that the expression can change the test_status
  * fields, it can reorder or optimize away the accesses to those fields.
  * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
  * expression to prevent that.
  *
- * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
- * false. This allows detecting KASAN reports that happen outside of the checks
- * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
- * and in kasan_test_exit.
+ * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
+ * as false. This allows detecting KASAN reports that happen outside of the
+ * checks by asserting !test_status.report_found at the start of
+ * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
  */
 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {                 \
        if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                         \
            kasan_sync_fault_possible())                                \
                migrate_disable();                                      \
-       KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found));    \
+       KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));  \
        barrier();                                                      \
        expression;                                                     \
        barrier();                                                      \
-       if (!READ_ONCE(fail_data.report_found)) {                       \
+       if (kasan_async_fault_possible())                               \
+               kasan_force_async_fault();                              \
+       if (!READ_ONCE(test_status.report_found)) {                     \
                KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "  \
                                "expected in \"" #expression            \
                                 "\", but none occurred");              \
        }                                                               \
-       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) {                         \
-               if (READ_ONCE(fail_data.report_found))                  \
-                       kasan_enable_tagging_sync();                    \
+       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                         \
+           kasan_sync_fault_possible()) {                              \
+               if (READ_ONCE(test_status.report_found) &&              \
+                   READ_ONCE(test_status.sync_fault))                  \
+                       kasan_enable_tagging();                         \
                migrate_enable();                                       \
        }                                                               \
-       WRITE_ONCE(fail_data.report_found, false);                      \
+       WRITE_ONCE(test_status.report_found, false);                    \
 } while (0)
 
 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {                  \
@@ -780,7 +786,7 @@ static void ksize_uaf(struct kunit *test)
 static void kasan_stack_oob(struct kunit *test)
 {
        char stack_array[10];
-       /* See comment in kasan_global_oob. */
+       /* See comment in kasan_global_oob_right. */
        char *volatile array = stack_array;
        char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
 
@@ -793,7 +799,7 @@ static void kasan_alloca_oob_left(struct kunit *test)
 {
        volatile int i = 10;
        char alloca_array[i];
-       /* See comment in kasan_global_oob. */
+       /* See comment in kasan_global_oob_right. */
        char *volatile array = alloca_array;
        char *p = array - 1;
 
@@ -808,7 +814,7 @@ static void kasan_alloca_oob_right(struct kunit *test)
 {
        volatile int i = 10;
        char alloca_array[i];
-       /* See comment in kasan_global_oob. */
+       /* See comment in kasan_global_oob_right. */
        char *volatile array = alloca_array;
        char *p = array + i;
 
@@ -869,11 +875,14 @@ static void kmem_cache_invalid_free(struct kunit *test)
        kmem_cache_destroy(cache);
 }
 
+static void empty_cache_ctor(void *object) { }
+
 static void kmem_cache_double_destroy(struct kunit *test)
 {
        struct kmem_cache *cache;
 
-       cache = kmem_cache_create("test_cache", 200, 0, 0, NULL);
+       /* Provide a constructor to prevent cache merging. */
+       cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
        kmem_cache_destroy(cache);
        KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
@@ -1054,21 +1063,186 @@ static void kmalloc_double_kzfree(struct kunit *test)
        KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
 }
 
+static void vmalloc_helpers_tags(struct kunit *test)
+{
+       void *ptr;
+
+       /* This test is intended for tag-based modes. */
+       KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+
+       ptr = vmalloc(PAGE_SIZE);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+       /* Check that the returned pointer is tagged. */
+       KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+       KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+
+       /* Make sure exported vmalloc helpers handle tagged pointers. */
+       KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
+
+#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
+       {
+               int rv;
+
+               /* Make sure vmalloc'ed memory permissions can be changed. */
+               rv = set_memory_ro((unsigned long)ptr, 1);
+               KUNIT_ASSERT_GE(test, rv, 0);
+               rv = set_memory_rw((unsigned long)ptr, 1);
+               KUNIT_ASSERT_GE(test, rv, 0);
+       }
+#endif
+
+       vfree(ptr);
+}
+
 static void vmalloc_oob(struct kunit *test)
 {
-       void *area;
+       char *v_ptr, *p_ptr;
+       struct page *page;
+       size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
 
        KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
 
+       v_ptr = vmalloc(size);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+       OPTIMIZER_HIDE_VAR(v_ptr);
+
        /*
-        * We have to be careful not to hit the guard page.
+        * We have to be careful not to hit the guard page in vmalloc tests.
         * The MMU will catch that and crash us.
         */
-       area = vmalloc(3000);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
 
-       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
-       vfree(area);
+       /* Make sure in-bounds accesses are valid. */
+       v_ptr[0] = 0;
+       v_ptr[size - 1] = 0;
+
+       /*
+        * An unaligned access past the requested vmalloc size.
+        * Only generic KASAN can precisely detect these.
+        */
+       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+               KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
+
+       /* An aligned access into the first out-of-bounds granule. */
+       KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
+
+       /* Check that in-bounds accesses to the physical page are valid. */
+       page = vmalloc_to_page(v_ptr);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
+       p_ptr = page_address(page);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+       p_ptr[0] = 0;
+
+       vfree(v_ptr);
+
+       /*
+        * We can't check for use-after-unmap bugs in this nor in the following
+        * vmalloc tests, as the page might be fully unmapped and accessing it
+        * will crash the kernel.
+        */
+}
+
+static void vmap_tags(struct kunit *test)
+{
+       char *p_ptr, *v_ptr;
+       struct page *p_page, *v_page;
+
+       /*
+        * This test is specifically crafted for the software tag-based mode,
+        * the only tag-based mode that poisons vmap mappings.
+        */
+       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+
+       p_page = alloc_pages(GFP_KERNEL, 1);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
+       p_ptr = page_address(p_page);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+
+       v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+       /*
+        * We can't check for out-of-bounds bugs in this nor in the following
+        * vmalloc tests, as allocations have page granularity and accessing
+        * the guard page will crash the kernel.
+        */
+
+       KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
+       KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
+
+       /* Make sure that in-bounds accesses through both pointers work. */
+       *p_ptr = 0;
+       *v_ptr = 0;
+
+       /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
+       v_page = vmalloc_to_page(v_ptr);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
+       KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
+
+       vunmap(v_ptr);
+       free_pages((unsigned long)p_ptr, 1);
+}
+
+static void vm_map_ram_tags(struct kunit *test)
+{
+       char *p_ptr, *v_ptr;
+       struct page *page;
+
+       /*
+        * This test is specifically crafted for the software tag-based mode,
+        * the only tag-based mode that poisons vm_map_ram mappings.
+        */
+       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+       page = alloc_pages(GFP_KERNEL, 1);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
+       p_ptr = page_address(page);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
+
+       v_ptr = vm_map_ram(&page, 1, -1);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
+
+       KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
+       KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
+
+       /* Make sure that in-bounds accesses through both pointers work. */
+       *p_ptr = 0;
+       *v_ptr = 0;
+
+       vm_unmap_ram(v_ptr, 1);
+       free_pages((unsigned long)p_ptr, 1);
+}
+
+static void vmalloc_percpu(struct kunit *test)
+{
+       char __percpu *ptr;
+       int cpu;
+
+       /*
+        * This test is specifically crafted for the software tag-based mode,
+        * the only tag-based mode that poisons percpu mappings.
+        */
+       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
+
+       ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+
+       for_each_possible_cpu(cpu) {
+               char *c_ptr = per_cpu_ptr(ptr, cpu);
+
+               KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
+               KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
+
+               /* Make sure that in-bounds accesses don't crash the kernel. */
+               *c_ptr = 0;
+       }
+
+       free_percpu(ptr);
 }
 
 /*
@@ -1102,6 +1276,18 @@ static void match_all_not_assigned(struct kunit *test)
                KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
                free_pages((unsigned long)ptr, order);
        }
+
+       if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               return;
+
+       for (i = 0; i < 256; i++) {
+               size = (get_random_int() % 1024) + 1;
+               ptr = vmalloc(size);
+               KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+               KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+               KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+               vfree(ptr);
+       }
 }
 
 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
@@ -1207,7 +1393,11 @@ static struct kunit_case kasan_kunit_test_cases[] = {
        KUNIT_CASE(kasan_bitops_generic),
        KUNIT_CASE(kasan_bitops_tags),
        KUNIT_CASE(kmalloc_double_kzfree),
+       KUNIT_CASE(vmalloc_helpers_tags),
        KUNIT_CASE(vmalloc_oob),
+       KUNIT_CASE(vmap_tags),
+       KUNIT_CASE(vm_map_ram_tags),
+       KUNIT_CASE(vmalloc_percpu),
        KUNIT_CASE(match_all_not_assigned),
        KUNIT_CASE(match_all_ptr_tag),
        KUNIT_CASE(match_all_mem_tag),