Disable KASAN instrumentation of arch/arm64/kernel/stacktrace.c.
This speeds up Generic KASAN by 5-20%.
As a side-effect, KASAN is now unable to detect bugs in the stack trace
collection code. This is taken as an acceptable downside.
Also replace READ_ONCE_NOCHECK() with READ_ONCE() in stacktrace.c.
As the file is now not instrumented, there is no need to use the
NOCHECK version of READ_ONCE().
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Link: https://lore.kernel.org/r/c4c944a2a905e949760fbeb29258185087171708.1653317461.git.andreyknvl@google.com
Signed-off-by: Will Deacon <will@kernel.org>
 
 CFLAGS_REMOVE_syscall.o         = -fstack-protector -fstack-protector-strong
 CFLAGS_syscall.o       += -fno-stack-protector
 
+# When KASAN is enabled, a stack trace is recorded for every alloc/free, which
+# can significantly impact performance. Avoid instrumenting the stack trace
+# collection code to minimize this impact.
+KASAN_SANITIZE_stacktrace.o := n
+
 # It's not safe to invoke KCOV when portions of the kernel environment aren't
 # available or are out-of-sync with HW state. Since `noinstr` doesn't always
 # inhibit KCOV instrumentation, disable it for the entire compilation unit.
 
         * Record this frame record's values and location. The prev_fp and
         * prev_type are only meaningful to the next unwind_next() invocation.
         */
-       state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
-       state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+       state->fp = READ_ONCE(*(unsigned long *)(fp));
+       state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
        state->prev_fp = fp;
        state->prev_type = info.type;