kasan: define KASAN_MEMORY_PER_SHADOW_PAGE
authorAndrey Konovalov <andreyknvl@google.com>
Tue, 22 Dec 2020 20:00:35 +0000 (12:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Dec 2020 20:55:06 +0000 (12:55 -0800)
Define KASAN_MEMORY_PER_SHADOW_PAGE as (KASAN_GRANULE_SIZE << PAGE_SHIFT),
which is the same as (KASAN_GRANULE_SIZE * PAGE_SIZE) for software modes
that use shadow memory, and use it across KASAN code to simplify it.

Link: https://lkml.kernel.org/r/8329391cfe14b5cffd3decf3b5c535b6ce21eef6.1606161801.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/kasan/init.c
mm/kasan/kasan.h
mm/kasan/shadow.c

index 1a71eaa..bc0ad20 100644 (file)
@@ -441,9 +441,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
        addr = (unsigned long)kasan_mem_to_shadow(start);
        end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
-       if (WARN_ON((unsigned long)start %
-                       (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
-           WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
+       if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
+           WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
                return;
 
        for (; addr < end; addr = next) {
@@ -476,9 +475,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
        shadow_start = kasan_mem_to_shadow(start);
        shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
-       if (WARN_ON((unsigned long)start %
-                       (KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
-           WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
+       if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
+           WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
                return -EINVAL;
 
        ret = kasan_populate_early_shadow(shadow_start, shadow_end);
index 53b095f..eec88bf 100644 (file)
@@ -8,6 +8,8 @@
 #define KASAN_GRANULE_SIZE     (1UL << KASAN_SHADOW_SCALE_SHIFT)
 #define KASAN_GRANULE_MASK     (KASAN_GRANULE_SIZE - 1)
 
+#define KASAN_MEMORY_PER_SHADOW_PAGE   (KASAN_GRANULE_SIZE << PAGE_SHIFT)
+
 #define KASAN_TAG_KERNEL       0xFF /* native kernel pointers tag */
 #define KASAN_TAG_INVALID      0xFE /* inaccessible memory tag */
 #define KASAN_TAG_MAX          0xFD /* maximum value for random tags */
index 5faba87..ba84e51 100644 (file)
@@ -161,7 +161,7 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
        shadow_end = shadow_start + shadow_size;
 
        if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
-               WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
+               WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
                return NOTIFY_BAD;
 
        switch (action) {
@@ -432,22 +432,20 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
        unsigned long region_start, region_end;
        unsigned long size;
 
-       region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
-       region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
+       region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
+       region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
 
-       free_region_start = ALIGN(free_region_start,
-                                 PAGE_SIZE * KASAN_GRANULE_SIZE);
+       free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
 
        if (start != region_start &&
            free_region_start < region_start)
-               region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
+               region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
 
-       free_region_end = ALIGN_DOWN(free_region_end,
-                                    PAGE_SIZE * KASAN_GRANULE_SIZE);
+       free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
 
        if (end != region_end &&
            free_region_end > region_end)
-               region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
+               region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
 
        shadow_start = kasan_mem_to_shadow((void *)region_start);
        shadow_end = kasan_mem_to_shadow((void *)region_end);