* Andrey Konovalov <andreyknvl@gmail.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) {
- s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
return unlikely(last_accessible_byte >= shadow_value);
}
* Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both.
*/
- if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+ if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1);
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
- if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow ||
- ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
return true;
}
return false;
return check_memory_region_inline(addr, size, write, ret_ip);
}
+bool check_invalid_free(void *addr)
+{
+ s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
+
+ return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
+}
+
void kasan_cache_shrink(struct kmem_cache *cache)
{
quarantine_remove_cache(cache);
static void register_global(struct kasan_global *global)
{
- size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+ size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
- kasan_unpoison_shadow(global->beg, global->size);
+ unpoison_range(global->beg, global->size);
- kasan_poison_shadow(global->beg + aligned_size,
- global->size_with_redzone - aligned_size,
- KASAN_GLOBAL_REDZONE);
+ poison_range(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
}
void __asan_register_globals(struct kasan_global *globals, size_t size)
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
- size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size;
- size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE);
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
- kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
- size - rounded_down_size);
- kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
- KASAN_ALLOCA_LEFT);
- kasan_poison_shadow(right_redzone,
- padding_size + KASAN_ALLOCA_REDZONE_SIZE,
- KASAN_ALLOCA_RIGHT);
+ unpoison_range((const void *)(addr + rounded_down_size),
+ size - rounded_down_size);
+ poison_range(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_LEFT);
+ poison_range(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_RIGHT);
}
EXPORT_SYMBOL(__asan_alloca_poison);
if (unlikely(!stack_top || stack_top > stack_bottom))
return;
- kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+ unpoison_range(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);
{
struct page *page = kasan_addr_to_page(addr);
struct kmem_cache *cache;
- struct kasan_alloc_meta *alloc_info;
+ struct kasan_alloc_meta *alloc_meta;
void *object;
if (!(page && PageSlab(page)))
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
- alloc_info = get_alloc_info(cache, object);
+ alloc_meta = kasan_get_alloc_meta(cache, object);
- alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
- alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
+ alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
+ alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
}
void kasan_set_free_info(struct kmem_cache *cache,
{
struct kasan_free_meta *free_meta;
- free_meta = get_free_info(cache, object);
- kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
+ free_meta = kasan_get_free_meta(cache, object);
+ if (!free_meta)
+ return;
- /*
- * the object was freed and has free track set
- */
+ kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
+ /* The object was freed and has free track set. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
}
{
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
return NULL;
- return &get_free_info(cache, object)->free_track;
+ /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
+ return &kasan_get_free_meta(cache, object)->free_track;
}