kcsan: Avoid checking scoped accesses from nested contexts
authorMarco Elver <elver@google.com>
Tue, 30 Nov 2021 11:44:11 +0000 (12:44 +0100)
committerPaul E. McKenney <paulmck@kernel.org>
Fri, 10 Dec 2021 00:42:26 +0000 (16:42 -0800)
Avoid checking scoped accesses from nested contexts (such as nested
interrupts or in scheduler code) which share the same kcsan_ctx.

This is to avoid detecting false positive races of accesses in the same
thread with currently scoped accesses: consider setting up a watchpoint
for a non-scoped (normal) access that also "conflicts" with a current
scoped access. In a nested interrupt (or in the scheduler), which shares
the same kcsan_ctx, we cannot check scoped accesses set up in the parent
context -- simply ignore them in this case.

With the introduction of kcsan_ctx::disable_scoped, we can also clean up
kcsan_check_scoped_accesses()'s recursion guard, and do not need to
modify the list's prev pointer.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
include/linux/kcsan.h
kernel/kcsan/core.c

index fc266ec..13cef34 100644 (file)
@@ -21,6 +21,7 @@
  */
 struct kcsan_ctx {
        int disable_count; /* disable counter */
+       int disable_scoped; /* disable scoped access counter */
        int atomic_next; /* number of following atomic ops */
 
        /*
index e34a171..bd359f8 100644 (file)
@@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
 static noinline void kcsan_check_scoped_accesses(void)
 {
        struct kcsan_ctx *ctx = get_ctx();
-       struct list_head *prev_save = ctx->scoped_accesses.prev;
        struct kcsan_scoped_access *scoped_access;
 
-       ctx->scoped_accesses.prev = NULL;  /* Avoid recursion. */
+       if (ctx->disable_scoped)
+               return;
+
+       ctx->disable_scoped++;
        list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
                check_access(scoped_access->ptr, scoped_access->size,
                             scoped_access->type, scoped_access->ip);
        }
-       ctx->scoped_accesses.prev = prev_save;
+       ctx->disable_scoped--;
 }
 
 /* Rules for generic atomic accesses. Called from fast-path. */
@@ -465,6 +467,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
                goto out;
        }
 
+       /*
+        * Avoid races of scoped accesses from nested interrupts (or scheduler).
+        * Assume setting up a watchpoint for a non-scoped (normal) access that
+        * also conflicts with a current scoped access. In a nested interrupt,
+        * which shares the context, it would check a conflicting scoped access.
+        * To avoid, disable scoped access checking.
+        */
+       ctx->disable_scoped++;
+
        /*
         * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
         * runtime is entered for every memory access, and potentially useful
@@ -578,6 +589,7 @@ out_unlock:
        if (!kcsan_interrupt_watcher)
                local_irq_restore(irq_flags);
        kcsan_restore_irqtrace(current);
+       ctx->disable_scoped--;
 out:
        user_access_restore(ua_flags);
 }