tools headers UAPI: Sync linux/prctl.h with the kernel sources
[linux-2.6-microblaze.git] / mm / kasan / common.c
index 950fd37..b251676 100644 (file)
@@ -1,24 +1,18 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * This file contains common generic and tag-based KASAN code.
+ * This file contains common KASAN code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some code borrowed from https://github.com/xairy/kasan-prototype by
  *        Andrey Konovalov <andreyknvl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  */
 
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
-#include <linux/kmemleak.h>
 #include <linux/linkage.h>
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/stacktrace.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/vmalloc.h>
 #include <linux/bug.h>
 
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
 #include "kasan.h"
 #include "../slab.h"
 
@@ -56,6 +46,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags)
        track->stack = kasan_save_stack(flags);
 }
 
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 void kasan_enable_current(void)
 {
        current->kasan_depth++;
@@ -65,106 +56,20 @@ void kasan_disable_current(void)
 {
        current->kasan_depth--;
 }
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
-bool __kasan_check_read(const volatile void *p, unsigned int size)
-{
-       return check_memory_region((unsigned long)p, size, false, _RET_IP_);
-}
-EXPORT_SYMBOL(__kasan_check_read);
-
-bool __kasan_check_write(const volatile void *p, unsigned int size)
-{
-       return check_memory_region((unsigned long)p, size, true, _RET_IP_);
-}
-EXPORT_SYMBOL(__kasan_check_write);
-
-#undef memset
-void *memset(void *addr, int c, size_t len)
-{
-       if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
-               return NULL;
-
-       return __memset(addr, c, len);
-}
-
-#ifdef __HAVE_ARCH_MEMMOVE
-#undef memmove
-void *memmove(void *dest, const void *src, size_t len)
-{
-       if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
-           !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
-               return NULL;
-
-       return __memmove(dest, src, len);
-}
-#endif
-
-#undef memcpy
-void *memcpy(void *dest, const void *src, size_t len)
+void __kasan_unpoison_range(const void *address, size_t size)
 {
-       if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
-           !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
-               return NULL;
-
-       return __memcpy(dest, src, len);
-}
-
-/*
- * Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
- */
-void kasan_poison_shadow(const void *address, size_t size, u8 value)
-{
-       void *shadow_start, *shadow_end;
-
-       /*
-        * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_poison_object_data) pass tagged
-        * addresses to this function.
-        */
-       address = reset_tag(address);
-
-       shadow_start = kasan_mem_to_shadow(address);
-       shadow_end = kasan_mem_to_shadow(address + size);
-
-       __memset(shadow_start, value, shadow_end - shadow_start);
-}
-
-void kasan_unpoison_shadow(const void *address, size_t size)
-{
-       u8 tag = get_tag(address);
-
-       /*
-        * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
-        * addresses to this function.
-        */
-       address = reset_tag(address);
-
-       kasan_poison_shadow(address, size, tag);
-
-       if (size & KASAN_SHADOW_MASK) {
-               u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
-
-               if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
-                       *shadow = tag;
-               else
-                       *shadow = size & KASAN_SHADOW_MASK;
-       }
-}
-
-static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
-{
-       void *base = task_stack_page(task);
-       size_t size = sp - base;
-
-       kasan_unpoison_shadow(base, size);
+       unpoison_range(address, size);
 }
 
+#if CONFIG_KASAN_STACK
 /* Unpoison the entire stack for a task. */
 void kasan_unpoison_task_stack(struct task_struct *task)
 {
-       __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
+       void *base = task_stack_page(task);
+
+       unpoison_range(base, THREAD_SIZE);
 }
 
 /* Unpoison the stack for the current task beyond a watermark sp value. */
@@ -177,10 +82,22 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
         */
        void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 
-       kasan_unpoison_shadow(base, watermark - base);
+       unpoison_range(base, watermark - base);
+}
+#endif /* CONFIG_KASAN_STACK */
+
+/*
+ * Only allow cache merging when stack collection is disabled and no metadata
+ * is present.
+ */
+slab_flags_t __kasan_never_merge(void)
+{
+       if (kasan_stack_collection_enabled())
+               return SLAB_KASAN;
+       return 0;
 }
 
-void kasan_alloc_pages(struct page *page, unsigned int order)
+void __kasan_alloc_pages(struct page *page, unsigned int order)
 {
        u8 tag;
        unsigned long i;
@@ -191,13 +108,13 @@ void kasan_alloc_pages(struct page *page, unsigned int order)
        tag = random_tag();
        for (i = 0; i < (1 << order); i++)
                page_kasan_tag_set(page + i, tag);
-       kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
+       unpoison_range(page_address(page), PAGE_SIZE << order);
 }
 
-void kasan_free_pages(struct page *page, unsigned int order)
+void __kasan_free_pages(struct page *page, unsigned int order)
 {
        if (likely(!PageHighMem(page)))
-               kasan_poison_shadow(page_address(page),
+               poison_range(page_address(page),
                                PAGE_SIZE << order,
                                KASAN_FREE_PAGE);
 }
@@ -208,9 +125,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
  */
 static inline unsigned int optimal_redzone(unsigned int object_size)
 {
-       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
-               return 0;
-
        return
                object_size <= 64        - 16   ? 16 :
                object_size <= 128       - 32   ? 32 :
@@ -221,88 +135,129 @@ static inline unsigned int optimal_redzone(unsigned int object_size)
                object_size <= (1 << 16) - 1024 ? 1024 : 2048;
 }
 
-void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
-                       slab_flags_t *flags)
+void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+                         slab_flags_t *flags)
 {
-       unsigned int orig_size = *size;
-       unsigned int redzone_size;
-       int redzone_adjust;
+       unsigned int ok_size;
+       unsigned int optimal_size;
 
-       /* Add alloc meta. */
-       cache->kasan_info.alloc_meta_offset = *size;
-       *size += sizeof(struct kasan_alloc_meta);
+       /*
+        * SLAB_KASAN is used to mark caches as ones that are sanitized by
+        * KASAN. Currently this flag is used in two places:
+        * 1. In slab_ksize() when calculating the size of the accessible
+        *    memory within the object.
+        * 2. In slab_common.c to prevent merging of sanitized caches.
+        */
+       *flags |= SLAB_KASAN;
 
-       /* Add free meta. */
-       if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-           (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
-            cache->object_size < sizeof(struct kasan_free_meta))) {
-               cache->kasan_info.free_meta_offset = *size;
-               *size += sizeof(struct kasan_free_meta);
-       }
+       if (!kasan_stack_collection_enabled())
+               return;
 
-       redzone_size = optimal_redzone(cache->object_size);
-       redzone_adjust = redzone_size - (*size - cache->object_size);
-       if (redzone_adjust > 0)
-               *size += redzone_adjust;
+       ok_size = *size;
 
-       *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
-                       max(*size, cache->object_size + redzone_size));
+       /* Add alloc meta into redzone. */
+       cache->kasan_info.alloc_meta_offset = *size;
+       *size += sizeof(struct kasan_alloc_meta);
 
        /*
-        * If the metadata doesn't fit, don't enable KASAN at all.
+        * If alloc meta doesn't fit, don't add it.
+        * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
+        * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
+        * larger sizes.
         */
-       if (*size <= cache->kasan_info.alloc_meta_offset ||
-                       *size <= cache->kasan_info.free_meta_offset) {
+       if (*size > KMALLOC_MAX_SIZE) {
                cache->kasan_info.alloc_meta_offset = 0;
-               cache->kasan_info.free_meta_offset = 0;
-               *size = orig_size;
+               *size = ok_size;
+               /* Continue, since free meta might still fit. */
+       }
+
+       /* Only the generic mode uses free meta or flexible redzones. */
+       if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+               cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
                return;
        }
 
-       *flags |= SLAB_KASAN;
+       /*
+        * Add free meta into redzone when it's not possible to store
+        * it in the object. This is the case when:
+        * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
+        *    be touched after it was freed, or
+        * 2. Object has a constructor, which means it's expected to
+        *    retain its content until the next allocation, or
+        * 3. Object is too small.
+        * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
+        */
+       if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
+           cache->object_size < sizeof(struct kasan_free_meta)) {
+               ok_size = *size;
+
+               cache->kasan_info.free_meta_offset = *size;
+               *size += sizeof(struct kasan_free_meta);
+
+               /* If free meta doesn't fit, don't add it. */
+               if (*size > KMALLOC_MAX_SIZE) {
+                       cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+                       *size = ok_size;
+               }
+       }
+
+       /* Calculate size with optimal redzone. */
+       optimal_size = cache->object_size + optimal_redzone(cache->object_size);
+       /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+       if (optimal_size > KMALLOC_MAX_SIZE)
+               optimal_size = KMALLOC_MAX_SIZE;
+       /* Use optimal size if the size with added metas is not large enough. */
+       if (*size < optimal_size)
+               *size = optimal_size;
 }
 
-size_t kasan_metadata_size(struct kmem_cache *cache)
+size_t __kasan_metadata_size(struct kmem_cache *cache)
 {
+       if (!kasan_stack_collection_enabled())
+               return 0;
        return (cache->kasan_info.alloc_meta_offset ?
                sizeof(struct kasan_alloc_meta) : 0) +
                (cache->kasan_info.free_meta_offset ?
                sizeof(struct kasan_free_meta) : 0);
 }
 
-struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
-                                       const void *object)
+struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
+                                             const void *object)
 {
-       return (void *)object + cache->kasan_info.alloc_meta_offset;
+       if (!cache->kasan_info.alloc_meta_offset)
+               return NULL;
+       return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
 }
 
-struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
-                                     const void *object)
+#ifdef CONFIG_KASAN_GENERIC
+struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
+                                           const void *object)
 {
        BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
-       return (void *)object + cache->kasan_info.free_meta_offset;
+       if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
+               return NULL;
+       return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
 }
+#endif
 
-void kasan_poison_slab(struct page *page)
+void __kasan_poison_slab(struct page *page)
 {
        unsigned long i;
 
        for (i = 0; i < compound_nr(page); i++)
                page_kasan_tag_reset(page + i);
-       kasan_poison_shadow(page_address(page), page_size(page),
-                       KASAN_KMALLOC_REDZONE);
+       poison_range(page_address(page), page_size(page),
+                    KASAN_KMALLOC_REDZONE);
 }
 
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 {
-       kasan_unpoison_shadow(object, cache->object_size);
+       unpoison_range(object, cache->object_size);
 }
 
-void kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
 {
-       kasan_poison_shadow(object,
-                       round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
-                       KASAN_KMALLOC_REDZONE);
+       poison_range(object, cache->object_size, KASAN_KMALLOC_REDZONE);
 }
 
 /*
@@ -322,6 +277,9 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 static u8 assign_tag(struct kmem_cache *cache, const void *object,
                        bool init, bool keep_tag)
 {
+       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+               return 0xff;
+
        /*
         * 1. When an object is kmalloc()'ed, two hooks are called:
         *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
@@ -351,50 +309,32 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object,
 #endif
 }
 
-void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
                                                const void *object)
 {
-       struct kasan_alloc_meta *alloc_info;
+       struct kasan_alloc_meta *alloc_meta;
 
-       if (!(cache->flags & SLAB_KASAN))
-               return (void *)object;
-
-       alloc_info = get_alloc_info(cache, object);
-       __memset(alloc_info, 0, sizeof(*alloc_info));
+       if (kasan_stack_collection_enabled()) {
+               alloc_meta = kasan_get_alloc_meta(cache, object);
+               if (alloc_meta)
+                       __memset(alloc_meta, 0, sizeof(*alloc_meta));
+       }
 
-       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
-               object = set_tag(object,
-                               assign_tag(cache, object, true, false));
+       /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
+       object = set_tag(object, assign_tag(cache, object, true, false));
 
        return (void *)object;
 }
 
-static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
-{
-       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
-               return shadow_byte < 0 ||
-                       shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
-
-       /* else CONFIG_KASAN_SW_TAGS: */
-       if ((u8)shadow_byte == KASAN_TAG_INVALID)
-               return true;
-       if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
-               return true;
-
-       return false;
-}
-
-static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
+static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
                              unsigned long ip, bool quarantine)
 {
-       s8 shadow_byte;
        u8 tag;
        void *tagged_object;
-       unsigned long rounded_up_size;
 
        tag = get_tag(object);
        tagged_object = object;
-       object = reset_tag(object);
+       object = kasan_reset_tag(object);
 
        if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
            object)) {
@@ -406,37 +346,67 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
        if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
                return false;
 
-       shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
-       if (shadow_invalid(tag, shadow_byte)) {
+       if (check_invalid_free(tagged_object)) {
                kasan_report_invalid_free(tagged_object, ip);
                return true;
        }
 
-       rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
-       kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
+       poison_range(object, cache->object_size, KASAN_KMALLOC_FREE);
 
-       if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
-                       unlikely(!(cache->flags & SLAB_KASAN)))
+       if (!kasan_stack_collection_enabled())
+               return false;
+
+       if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
                return false;
 
        kasan_set_free_info(cache, object, tag);
 
-       quarantine_put(get_free_info(cache, object), cache);
+       return quarantine_put(cache, object);
+}
 
-       return IS_ENABLED(CONFIG_KASAN_GENERIC);
+bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
+{
+       return ____kasan_slab_free(cache, object, ip, true);
 }
 
-bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 {
-       return __kasan_slab_free(cache, object, ip, true);
+       struct page *page;
+
+       page = virt_to_head_page(ptr);
+
+       /*
+        * Even though this function is only called for kmem_cache_alloc and
+        * kmalloc backed mempool allocations, those allocations can still be
+        * !PageSlab() when the size provided to kmalloc is larger than
+        * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+        */
+       if (unlikely(!PageSlab(page))) {
+               if (ptr != page_address(page)) {
+                       kasan_report_invalid_free(ptr, ip);
+                       return;
+               }
+               poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
+       } else {
+               ____kasan_slab_free(page->slab_cache, ptr, ip, false);
+       }
+}
+
+static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
+{
+       struct kasan_alloc_meta *alloc_meta;
+
+       alloc_meta = kasan_get_alloc_meta(cache, object);
+       if (alloc_meta)
+               kasan_set_track(&alloc_meta->alloc_track, flags);
 }
 
-static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
+static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
                                size_t size, gfp_t flags, bool keep_tag)
 {
        unsigned long redzone_start;
        unsigned long redzone_end;
-       u8 tag = 0xff;
+       u8 tag;
 
        if (gfpflags_allow_blocking(flags))
                quarantine_reduce();
@@ -445,38 +415,36 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
                return NULL;
 
        redzone_start = round_up((unsigned long)(object + size),
-                               KASAN_SHADOW_SCALE_SIZE);
+                               KASAN_GRANULE_SIZE);
        redzone_end = round_up((unsigned long)object + cache->object_size,
-                               KASAN_SHADOW_SCALE_SIZE);
-
-       if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
-               tag = assign_tag(cache, object, false, keep_tag);
+                               KASAN_GRANULE_SIZE);
+       tag = assign_tag(cache, object, false, keep_tag);
 
-       /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
-       kasan_unpoison_shadow(set_tag(object, tag), size);
-       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
-               KASAN_KMALLOC_REDZONE);
+       /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
+       unpoison_range(set_tag(object, tag), size);
+       poison_range((void *)redzone_start, redzone_end - redzone_start,
+                    KASAN_KMALLOC_REDZONE);
 
-       if (cache->flags & SLAB_KASAN)
-               kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
+       if (kasan_stack_collection_enabled())
+               set_alloc_info(cache, (void *)object, flags);
 
        return set_tag(object, tag);
 }
 
-void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
-                                       gfp_t flags)
+void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
+                                       void *object, gfp_t flags)
 {
-       return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
+       return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
 }
 
-void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
-                               size_t size, gfp_t flags)
+void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
+                                       size_t size, gfp_t flags)
 {
-       return __kasan_kmalloc(cache, object, size, flags, true);
+       return ____kasan_kmalloc(cache, object, size, flags, true);
 }
-EXPORT_SYMBOL(kasan_kmalloc);
+EXPORT_SYMBOL(__kasan_kmalloc);
 
-void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
                                                gfp_t flags)
 {
        struct page *page;
@@ -491,17 +459,17 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 
        page = virt_to_page(ptr);
        redzone_start = round_up((unsigned long)(ptr + size),
-                               KASAN_SHADOW_SCALE_SIZE);
+                               KASAN_GRANULE_SIZE);
        redzone_end = (unsigned long)ptr + page_size(page);
 
-       kasan_unpoison_shadow(ptr, size);
-       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
-               KASAN_PAGE_REDZONE);
+       unpoison_range(ptr, size);
+       poison_range((void *)redzone_start, redzone_end - redzone_start,
+                    KASAN_PAGE_REDZONE);
 
        return (void *)ptr;
 }
 
-void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
+void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
        struct page *page;
 
@@ -511,421 +479,15 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
        page = virt_to_head_page(object);
 
        if (unlikely(!PageSlab(page)))
-               return kasan_kmalloc_large(object, size, flags);
+               return __kasan_kmalloc_large(object, size, flags);
        else
-               return __kasan_kmalloc(page->slab_cache, object, size,
+               return ____kasan_kmalloc(page->slab_cache, object, size,
                                                flags, true);
 }
 
-void kasan_poison_kfree(void *ptr, unsigned long ip)
-{
-       struct page *page;
-
-       page = virt_to_head_page(ptr);
-
-       if (unlikely(!PageSlab(page))) {
-               if (ptr != page_address(page)) {
-                       kasan_report_invalid_free(ptr, ip);
-                       return;
-               }
-               kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
-       } else {
-               __kasan_slab_free(page->slab_cache, ptr, ip, false);
-       }
-}
-
-void kasan_kfree_large(void *ptr, unsigned long ip)
+void __kasan_kfree_large(void *ptr, unsigned long ip)
 {
        if (ptr != page_address(virt_to_head_page(ptr)))
                kasan_report_invalid_free(ptr, ip);
-       /* The object will be poisoned by page_alloc. */
-}
-
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size)
-{
-       void *ret;
-       size_t scaled_size;
-       size_t shadow_size;
-       unsigned long shadow_start;
-
-       shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
-       shadow_size = round_up(scaled_size, PAGE_SIZE);
-
-       if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
-               return -EINVAL;
-
-       ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
-                       shadow_start + shadow_size,
-                       GFP_KERNEL,
-                       PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
-                       __builtin_return_address(0));
-
-       if (ret) {
-               __memset(ret, KASAN_SHADOW_INIT, shadow_size);
-               find_vm_area(addr)->flags |= VM_KASAN;
-               kmemleak_ignore(ret);
-               return 0;
-       }
-
-       return -ENOMEM;
-}
-
-void kasan_free_shadow(const struct vm_struct *vm)
-{
-       if (vm->flags & VM_KASAN)
-               vfree(kasan_mem_to_shadow(vm->addr));
-}
-#endif
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-static bool shadow_mapped(unsigned long addr)
-{
-       pgd_t *pgd = pgd_offset_k(addr);
-       p4d_t *p4d;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       if (pgd_none(*pgd))
-               return false;
-       p4d = p4d_offset(pgd, addr);
-       if (p4d_none(*p4d))
-               return false;
-       pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
-               return false;
-
-       /*
-        * We can't use pud_large() or pud_huge(), the first one is
-        * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
-        * pud_bad(), if pud is bad then it's bad because it's huge.
-        */
-       if (pud_bad(*pud))
-               return true;
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
-               return false;
-
-       if (pmd_bad(*pmd))
-               return true;
-       pte = pte_offset_kernel(pmd, addr);
-       return !pte_none(*pte);
-}
-
-static int __meminit kasan_mem_notifier(struct notifier_block *nb,
-                       unsigned long action, void *data)
-{
-       struct memory_notify *mem_data = data;
-       unsigned long nr_shadow_pages, start_kaddr, shadow_start;
-       unsigned long shadow_end, shadow_size;
-
-       nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
-       start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
-       shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
-       shadow_size = nr_shadow_pages << PAGE_SHIFT;
-       shadow_end = shadow_start + shadow_size;
-
-       if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
-               WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
-               return NOTIFY_BAD;
-
-       switch (action) {
-       case MEM_GOING_ONLINE: {
-               void *ret;
-
-               /*
-                * If shadow is mapped already than it must have been mapped
-                * during the boot. This could happen if we onlining previously
-                * offlined memory.
-                */
-               if (shadow_mapped(shadow_start))
-                       return NOTIFY_OK;
-
-               ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
-                                       shadow_end, GFP_KERNEL,
-                                       PAGE_KERNEL, VM_NO_GUARD,
-                                       pfn_to_nid(mem_data->start_pfn),
-                                       __builtin_return_address(0));
-               if (!ret)
-                       return NOTIFY_BAD;
-
-               kmemleak_ignore(ret);
-               return NOTIFY_OK;
-       }
-       case MEM_CANCEL_ONLINE:
-       case MEM_OFFLINE: {
-               struct vm_struct *vm;
-
-               /*
-                * shadow_start was either mapped during boot by kasan_init()
-                * or during memory online by __vmalloc_node_range().
-                * In the latter case we can use vfree() to free shadow.
-                * Non-NULL result of the find_vm_area() will tell us if
-                * that was the second case.
-                *
-                * Currently it's not possible to free shadow mapped
-                * during boot by kasan_init(). It's because the code
-                * to do that hasn't been written yet. So we'll just
-                * leak the memory.
-                */
-               vm = find_vm_area((void *)shadow_start);
-               if (vm)
-                       vfree((void *)shadow_start);
-       }
-       }
-
-       return NOTIFY_OK;
-}
-
-static int __init kasan_memhotplug_init(void)
-{
-       hotplug_memory_notifier(kasan_mem_notifier, 0);
-
-       return 0;
-}
-
-core_initcall(kasan_memhotplug_init);
-#endif
-
-#ifdef CONFIG_KASAN_VMALLOC
-static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
-                                     void *unused)
-{
-       unsigned long page;
-       pte_t pte;
-
-       if (likely(!pte_none(*ptep)))
-               return 0;
-
-       page = __get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
-
-       memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
-       pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
-
-       spin_lock(&init_mm.page_table_lock);
-       if (likely(pte_none(*ptep))) {
-               set_pte_at(&init_mm, addr, ptep, pte);
-               page = 0;
-       }
-       spin_unlock(&init_mm.page_table_lock);
-       if (page)
-               free_page(page);
-       return 0;
-}
-
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
-{
-       unsigned long shadow_start, shadow_end;
-       int ret;
-
-       if (!is_vmalloc_or_module_addr((void *)addr))
-               return 0;
-
-       shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
-       shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
-       shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
-       shadow_end = ALIGN(shadow_end, PAGE_SIZE);
-
-       ret = apply_to_page_range(&init_mm, shadow_start,
-                                 shadow_end - shadow_start,
-                                 kasan_populate_vmalloc_pte, NULL);
-       if (ret)
-               return ret;
-
-       flush_cache_vmap(shadow_start, shadow_end);
-
-       /*
-        * We need to be careful about inter-cpu effects here. Consider:
-        *
-        *   CPU#0                                CPU#1
-        * WRITE_ONCE(p, vmalloc(100));         while (x = READ_ONCE(p)) ;
-        *                                      p[99] = 1;
-        *
-        * With compiler instrumentation, that ends up looking like this:
-        *
-        *   CPU#0                                CPU#1
-        * // vmalloc() allocates memory
-        * // let a = area->addr
-        * // we reach kasan_populate_vmalloc
-        * // and call kasan_unpoison_shadow:
-        * STORE shadow(a), unpoison_val
-        * ...
-        * STORE shadow(a+99), unpoison_val     x = LOAD p
-        * // rest of vmalloc process           <data dependency>
-        * STORE p, a                           LOAD shadow(x+99)
-        *
-        * If there is no barrier between the end of unpoisioning the shadow
-        * and the store of the result to p, the stores could be committed
-        * in a different order by CPU#0, and CPU#1 could erroneously observe
-        * poison in the shadow.
-        *
-        * We need some sort of barrier between the stores.
-        *
-        * In the vmalloc() case, this is provided by a smp_wmb() in
-        * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
-        * get_vm_area() and friends, the caller gets shadow allocated but
-        * doesn't have any pages mapped into the virtual address space that
-        * has been reserved. Mapping those pages in will involve taking and
-        * releasing a page-table lock, which will provide the barrier.
-        */
-
-       return 0;
-}
-
-/*
- * Poison the shadow for a vmalloc region. Called as part of the
- * freeing process at the time the region is freed.
- */
-void kasan_poison_vmalloc(const void *start, unsigned long size)
-{
-       if (!is_vmalloc_or_module_addr(start))
-               return;
-
-       size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
-       kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
-}
-
-void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{
-       if (!is_vmalloc_or_module_addr(start))
-               return;
-
-       kasan_unpoison_shadow(start, size);
+       /* The object will be poisoned by kasan_free_pages(). */
 }
-
-static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
-                                       void *unused)
-{
-       unsigned long page;
-
-       page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
-
-       spin_lock(&init_mm.page_table_lock);
-
-       if (likely(!pte_none(*ptep))) {
-               pte_clear(&init_mm, addr, ptep);
-               free_page(page);
-       }
-       spin_unlock(&init_mm.page_table_lock);
-
-       return 0;
-}
-
-/*
- * Release the backing for the vmalloc region [start, end), which
- * lies within the free region [free_region_start, free_region_end).
- *
- * This can be run lazily, long after the region was freed. It runs
- * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
- * infrastructure.
- *
- * How does this work?
- * -------------------
- *
- * We have a region that is page aligned, labelled as A.
- * That might not map onto the shadow in a way that is page-aligned:
- *
- *                    start                     end
- *                    v                         v
- * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
- *  -------- -------- --------          -------- --------
- *      |        |       |                 |        |
- *      |        |       |         /-------/        |
- *      \-------\|/------/         |/---------------/
- *              |||                ||
- *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
- *                 (1)      (2)      (3)
- *
- * First we align the start upwards and the end downwards, so that the
- * shadow of the region aligns with shadow page boundaries. In the
- * example, this gives us the shadow page (2). This is the shadow entirely
- * covered by this allocation.
- *
- * Then we have the tricky bits. We want to know if we can free the
- * partially covered shadow pages - (1) and (3) in the example. For this,
- * we are given the start and end of the free region that contains this
- * allocation. Extending our previous example, we could have:
- *
- *  free_region_start                                    free_region_end
- *  |                 start                     end      |
- *  v                 v                         v        v
- * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
- *  -------- -------- --------          -------- --------
- *      |        |       |                 |        |
- *      |        |       |         /-------/        |
- *      \-------\|/------/         |/---------------/
- *              |||                ||
- *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
- *                 (1)      (2)      (3)
- *
- * Once again, we align the start of the free region up, and the end of
- * the free region down so that the shadow is page aligned. So we can free
- * page (1) - we know no allocation currently uses anything in that page,
- * because all of it is in the vmalloc free region. But we cannot free
- * page (3), because we can't be sure that the rest of it is unused.
- *
- * We only consider pages that contain part of the original region for
- * freeing: we don't try to free other pages from the free region or we'd
- * end up trying to free huge chunks of virtual address space.
- *
- * Concurrency
- * -----------
- *
- * How do we know that we're not freeing a page that is simultaneously
- * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
- *
- * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
- * at the same time. While we run under free_vmap_area_lock, the population
- * code does not.
- *
- * free_vmap_area_lock instead operates to ensure that the larger range
- * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
- * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
- * no space identified as free will become used while we are running. This
- * means that so long as we are careful with alignment and only free shadow
- * pages entirely covered by the free region, we will not run in to any
- * trouble - any simultaneous allocations will be for disjoint regions.
- */
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
-                          unsigned long free_region_start,
-                          unsigned long free_region_end)
-{
-       void *shadow_start, *shadow_end;
-       unsigned long region_start, region_end;
-       unsigned long size;
-
-       region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-       region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-
-       free_region_start = ALIGN(free_region_start,
-                                 PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-
-       if (start != region_start &&
-           free_region_start < region_start)
-               region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
-
-       free_region_end = ALIGN_DOWN(free_region_end,
-                                    PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-
-       if (end != region_end &&
-           free_region_end > region_end)
-               region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
-
-       shadow_start = kasan_mem_to_shadow((void *)region_start);
-       shadow_end = kasan_mem_to_shadow((void *)region_end);
-
-       if (shadow_end > shadow_start) {
-               size = shadow_end - shadow_start;
-               apply_to_existing_page_range(&init_mm,
-                                            (unsigned long)shadow_start,
-                                            size, kasan_depopulate_vmalloc_pte,
-                                            NULL);
-               flush_tlb_kernel_range((unsigned long)shadow_start,
-                                      (unsigned long)shadow_end);
-       }
-}
-#endif