1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "kcsan: " fmt
5 #include <linux/atomic.h>
7 #include <linux/delay.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/moduleparam.h>
13 #include <linux/percpu.h>
14 #include <linux/preempt.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 #include <linux/uaccess.h>
23 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
24 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
25 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
26 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
27 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
29 #ifdef MODULE_PARAM_PREFIX
30 #undef MODULE_PARAM_PREFIX
32 #define MODULE_PARAM_PREFIX "kcsan."
33 module_param_named(early_enable, kcsan_early_enable, bool, 0);
34 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
35 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
36 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
37 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
41 /* Per-CPU kcsan_ctx for interrupts */
42 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
45 .atomic_nest_count = 0,
46 .in_flat_atomic = false,
48 .scoped_accesses = {LIST_POISON1, NULL},
52 * Helper macros to index into adjacent slots, starting from address slot
53 * itself, followed by the right and left slots.
55 * The purpose is 2-fold:
57 * 1. if during insertion the address slot is already occupied, check if
58 * any adjacent slots are free;
59 * 2. accesses that straddle a slot boundary due to size that exceeds a
60 * slot's range may check adjacent slots if any watchpoint matches.
62 * Note that accesses with very large size may still miss a watchpoint; however,
63 * given this should be rare, this is a reasonable trade-off to make, since this
66 * 1. excessive contention between watchpoint checks and setup;
67 * 2. larger number of simultaneous watchpoints without sacrificing
70 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
74 * slot=63: [64, 65, 63]
76 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
79 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
80 * slot (middle) is fine if we assume that races occur rarely. The set of
81 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
82 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
84 #define SLOT_IDX_FAST(slot, i) (slot + i)
87 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
88 * able to safely update and access a watchpoint without introducing locking
89 * overhead, we encode each watchpoint as a single atomic long. The initial
90 * zero-initialized state matches INVALID_WATCHPOINT.
92 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
93 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
95 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
98 * Instructions to skip watching counter, used in should_watch(). We use a
99 * per-CPU counter to avoid excessive contention.
101 static DEFINE_PER_CPU(long, kcsan_skip);
103 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
106 long *encoded_watchpoint)
108 const int slot = watchpoint_slot(addr);
109 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
110 atomic_long_t *watchpoint;
111 unsigned long wp_addr_masked;
116 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
118 for (i = 0; i < NUM_SLOTS; ++i) {
119 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
120 *encoded_watchpoint = atomic_long_read(watchpoint);
121 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
122 &wp_size, &is_write))
125 if (expect_write && !is_write)
128 /* Check if the watchpoint matches the access. */
129 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
136 static inline atomic_long_t *
137 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
139 const int slot = watchpoint_slot(addr);
140 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
141 atomic_long_t *watchpoint;
144 /* Check slot index logic, ensuring we stay within array bounds. */
145 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
146 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
147 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
148 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
150 for (i = 0; i < NUM_SLOTS; ++i) {
151 long expect_val = INVALID_WATCHPOINT;
153 /* Try to acquire this slot. */
154 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
155 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
163 * Return true if watchpoint was successfully consumed, false otherwise.
165 * This may return false if:
167 * 1. another thread already consumed the watchpoint;
168 * 2. the thread that set up the watchpoint already removed it;
169 * 3. the watchpoint was removed and then re-used.
171 static __always_inline bool
172 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
174 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
177 /* Return true if watchpoint was not touched, false if already consumed. */
178 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
180 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
183 /* Remove the watchpoint -- its slot may be reused after. */
184 static inline void remove_watchpoint(atomic_long_t *watchpoint)
186 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
189 static __always_inline struct kcsan_ctx *get_ctx(void)
192 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
193 * also result in calls that generate warnings in uaccess regions.
195 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
198 /* Check scoped accesses; never inline because this is a slow-path! */
199 static noinline void kcsan_check_scoped_accesses(void)
201 struct kcsan_ctx *ctx = get_ctx();
202 struct list_head *prev_save = ctx->scoped_accesses.prev;
203 struct kcsan_scoped_access *scoped_access;
205 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
206 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
207 __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
208 ctx->scoped_accesses.prev = prev_save;
211 /* Rules for generic atomic accesses. Called from fast-path. */
212 static __always_inline bool
213 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
215 if (type & KCSAN_ACCESS_ATOMIC)
219 * Unless explicitly declared atomic, never consider an assertion access
220 * as atomic. This allows using them also in atomic regions, such as
221 * seqlocks, without implicitly changing their semantics.
223 if (type & KCSAN_ACCESS_ASSERT)
226 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
227 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
228 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
229 return true; /* Assume aligned writes up to word size are atomic. */
231 if (ctx->atomic_next > 0) {
233 * Because we do not have separate contexts for nested
234 * interrupts, in case atomic_next is set, we simply assume that
235 * the outer interrupt set atomic_next. In the worst case, we
236 * will conservatively consider operations as atomic. This is a
237 * reasonable trade-off to make, since this case should be
238 * extremely rare; however, even if extremely rare, it could
239 * lead to false positives otherwise.
241 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
242 --ctx->atomic_next; /* in task, or outer interrupt */
246 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
249 static __always_inline bool
250 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
253 * Never set up watchpoints when memory operations are atomic.
255 * Need to check this first, before kcsan_skip check below: (1) atomics
256 * should not count towards skipped instructions, and (2) to actually
257 * decrement kcsan_atomic_next for consecutive instruction stream.
259 if (is_atomic(ptr, size, type, ctx))
262 if (this_cpu_dec_return(kcsan_skip) >= 0)
266 * NOTE: If we get here, kcsan_skip must always be reset in slow path
267 * via reset_kcsan_skip() to avoid underflow.
270 /* this operation should be watched */
274 static inline void reset_kcsan_skip(void)
276 long skip_count = kcsan_skip_watch -
277 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
278 prandom_u32_max(kcsan_skip_watch) :
280 this_cpu_write(kcsan_skip, skip_count);
283 static __always_inline bool kcsan_is_enabled(void)
285 return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
288 static inline unsigned int get_delay(int type)
290 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
291 /* For certain access types, skew the random delay to be longer. */
292 unsigned int skew_delay_order =
293 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
295 return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
296 prandom_u32_max(delay >> skew_delay_order) :
300 void kcsan_save_irqtrace(struct task_struct *task)
302 #ifdef CONFIG_TRACE_IRQFLAGS
303 task->kcsan_save_irqtrace = task->irqtrace;
307 void kcsan_restore_irqtrace(struct task_struct *task)
309 #ifdef CONFIG_TRACE_IRQFLAGS
310 task->irqtrace = task->kcsan_save_irqtrace;
315 * Pull everything together: check_access() below contains the performance
316 * critical operations; the fast-path (including check_access) functions should
317 * all be inlinable by the instrumentation functions.
319 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
320 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
321 * be filtered from the stacktrace, as well as give them unique names for the
322 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
323 * since they do not access any user memory, but instrumentation is still
324 * emitted in UACCESS regions.
327 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
330 atomic_long_t *watchpoint,
331 long encoded_watchpoint)
336 if (!kcsan_is_enabled())
340 * The access_mask check relies on value-change comparison. To avoid
341 * reporting a race where e.g. the writer set up the watchpoint, but the
342 * reader has access_mask!=0, we have to ignore the found watchpoint.
344 if (get_ctx()->access_mask != 0)
348 * Consume the watchpoint as soon as possible, to minimize the chances
349 * of !consumed. Consuming the watchpoint must always be guarded by
350 * kcsan_is_enabled() check, as otherwise we might erroneously
351 * triggering reports when disabled.
353 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
355 /* keep this after try_consume_watchpoint */
356 flags = user_access_save();
359 kcsan_save_irqtrace(current);
360 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
361 KCSAN_REPORT_CONSUMED_WATCHPOINT,
362 watchpoint - watchpoints);
363 kcsan_restore_irqtrace(current);
366 * The other thread may not print any diagnostics, as it has
367 * already removed the watchpoint, or another thread consumed
368 * the watchpoint before this thread.
370 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
373 if ((type & KCSAN_ACCESS_ASSERT) != 0)
374 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
376 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
378 user_access_restore(flags);
382 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
384 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
385 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
386 atomic_long_t *watchpoint;
393 unsigned long access_mask;
394 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
395 unsigned long ua_flags = user_access_save();
396 unsigned long irq_flags = 0;
399 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
404 if (!kcsan_is_enabled())
408 * Special atomic rules: unlikely to be true, so we check them here in
409 * the slow-path, and not in the fast-path in is_atomic(). Call after
410 * kcsan_is_enabled(), as we may access memory that is not yet
411 * initialized during early boot.
413 if (!is_assert && kcsan_is_atomic_special(ptr))
416 if (!check_encodable((unsigned long)ptr, size)) {
417 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
422 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
423 * runtime is entered for every memory access, and potentially useful
424 * information is lost if dirtied by KCSAN.
426 kcsan_save_irqtrace(current);
427 if (!kcsan_interrupt_watcher)
428 local_irq_save(irq_flags);
430 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
431 if (watchpoint == NULL) {
433 * Out of capacity: the size of 'watchpoints', and the frequency
434 * with which should_watch() returns true should be tweaked so
435 * that this case happens very rarely.
437 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
441 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
442 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
445 * Read the current value, to later check and infer a race if the data
446 * was modified via a non-instrumented access, e.g. from a device.
451 expect_value._1 = READ_ONCE(*(const u8 *)ptr);
454 expect_value._2 = READ_ONCE(*(const u16 *)ptr);
457 expect_value._4 = READ_ONCE(*(const u32 *)ptr);
460 expect_value._8 = READ_ONCE(*(const u64 *)ptr);
463 break; /* ignore; we do not diff the values */
466 if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
467 kcsan_disable_current();
468 pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
469 is_write ? "write" : "read", size, ptr,
470 watchpoint_slot((unsigned long)ptr),
471 encode_watchpoint((unsigned long)ptr, size, is_write));
472 kcsan_enable_current();
476 * Delay this thread, to increase probability of observing a racy
477 * conflicting access.
479 udelay(get_delay(type));
482 * Re-read value, and check if it is as expected; if not, we infer a
485 access_mask = get_ctx()->access_mask;
488 expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
490 expect_value._1 &= (u8)access_mask;
493 expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
495 expect_value._2 &= (u16)access_mask;
498 expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
500 expect_value._4 &= (u32)access_mask;
503 expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
505 expect_value._8 &= (u64)access_mask;
508 break; /* ignore; we do not diff the values */
511 /* Were we able to observe a value-change? */
512 if (expect_value._8 != 0)
513 value_change = KCSAN_VALUE_CHANGE_TRUE;
515 /* Check if this access raced with another. */
516 if (!consume_watchpoint(watchpoint)) {
518 * Depending on the access type, map a value_change of MAYBE to
519 * TRUE (always report) or FALSE (never report).
521 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
522 if (access_mask != 0) {
524 * For access with access_mask, we require a
525 * value-change, as it is likely that races on
526 * ~access_mask bits are expected.
528 value_change = KCSAN_VALUE_CHANGE_FALSE;
529 } else if (size > 8 || is_assert) {
530 /* Always assume a value-change. */
531 value_change = KCSAN_VALUE_CHANGE_TRUE;
536 * No need to increment 'data_races' counter, as the racing
537 * thread already did.
539 * Count 'assert_failures' for each failed ASSERT access,
540 * therefore both this thread and the racing thread may
541 * increment this counter.
543 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
544 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
546 kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
547 watchpoint - watchpoints);
548 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
549 /* Inferring a race, since the value should not have changed. */
551 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
553 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
555 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
556 kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
557 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
558 watchpoint - watchpoints);
562 * Remove watchpoint; must be after reporting, since the slot may be
563 * reused after this point.
565 remove_watchpoint(watchpoint);
566 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
568 if (!kcsan_interrupt_watcher)
569 local_irq_restore(irq_flags);
570 kcsan_restore_irqtrace(current);
572 user_access_restore(ua_flags);
575 static __always_inline void check_access(const volatile void *ptr, size_t size,
578 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
579 atomic_long_t *watchpoint;
580 long encoded_watchpoint;
583 * Do nothing for 0 sized check; this comparison will be optimized out
584 * for constant sized instrumentation (__tsan_{read,write}N).
586 if (unlikely(size == 0))
590 * Avoid user_access_save in fast-path: find_watchpoint is safe without
591 * user_access_save, as the address that ptr points to is only used to
592 * check if a watchpoint exists; ptr is never dereferenced.
594 watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
595 &encoded_watchpoint);
597 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
598 * slow-path, as long as no state changes that cause a race to be
599 * detected and reported have occurred until kcsan_is_enabled() is
603 if (unlikely(watchpoint != NULL))
604 kcsan_found_watchpoint(ptr, size, type, watchpoint,
607 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
609 if (unlikely(should_watch(ptr, size, type, ctx)))
610 kcsan_setup_watchpoint(ptr, size, type);
611 else if (unlikely(ctx->scoped_accesses.prev))
612 kcsan_check_scoped_accesses();
616 /* === Public interface ===================================================== */
618 void __init kcsan_init(void)
622 kcsan_debugfs_init();
625 * We are in the init task, and no other tasks should be running;
626 * WRITE_ONCE without memory barrier is sufficient.
628 if (kcsan_early_enable) {
629 pr_info("enabled early\n");
630 WRITE_ONCE(kcsan_enabled, true);
634 /* === Exported interface =================================================== */
636 void kcsan_disable_current(void)
638 ++get_ctx()->disable_count;
640 EXPORT_SYMBOL(kcsan_disable_current);
642 void kcsan_enable_current(void)
644 if (get_ctx()->disable_count-- == 0) {
646 * Warn if kcsan_enable_current() calls are unbalanced with
647 * kcsan_disable_current() calls, which causes disable_count to
648 * become negative and should not happen.
650 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
651 kcsan_disable_current(); /* disable to generate warning */
652 WARN(1, "Unbalanced %s()", __func__);
653 kcsan_enable_current();
656 EXPORT_SYMBOL(kcsan_enable_current);
658 void kcsan_enable_current_nowarn(void)
660 if (get_ctx()->disable_count-- == 0)
661 kcsan_disable_current();
663 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
665 void kcsan_nestable_atomic_begin(void)
668 * Do *not* check and warn if we are in a flat atomic region: nestable
669 * and flat atomic regions are independent from each other.
670 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
674 ++get_ctx()->atomic_nest_count;
676 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
678 void kcsan_nestable_atomic_end(void)
680 if (get_ctx()->atomic_nest_count-- == 0) {
682 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
683 * kcsan_nestable_atomic_begin() calls, which causes
684 * atomic_nest_count to become negative and should not happen.
686 kcsan_nestable_atomic_begin(); /* restore to 0 */
687 kcsan_disable_current(); /* disable to generate warning */
688 WARN(1, "Unbalanced %s()", __func__);
689 kcsan_enable_current();
692 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
694 void kcsan_flat_atomic_begin(void)
696 get_ctx()->in_flat_atomic = true;
698 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
700 void kcsan_flat_atomic_end(void)
702 get_ctx()->in_flat_atomic = false;
704 EXPORT_SYMBOL(kcsan_flat_atomic_end);
706 void kcsan_atomic_next(int n)
708 get_ctx()->atomic_next = n;
710 EXPORT_SYMBOL(kcsan_atomic_next);
712 void kcsan_set_access_mask(unsigned long mask)
714 get_ctx()->access_mask = mask;
716 EXPORT_SYMBOL(kcsan_set_access_mask);
718 struct kcsan_scoped_access *
719 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
720 struct kcsan_scoped_access *sa)
722 struct kcsan_ctx *ctx = get_ctx();
724 __kcsan_check_access(ptr, size, type);
726 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
728 INIT_LIST_HEAD(&sa->list);
733 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
734 INIT_LIST_HEAD(&ctx->scoped_accesses);
735 list_add(&sa->list, &ctx->scoped_accesses);
737 ctx->disable_count--;
740 EXPORT_SYMBOL(kcsan_begin_scoped_access);
742 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
744 struct kcsan_ctx *ctx = get_ctx();
746 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
749 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
752 if (list_empty(&ctx->scoped_accesses))
754 * Ensure we do not enter kcsan_check_scoped_accesses()
755 * slow-path if unnecessary, and avoids requiring list_empty()
756 * in the fast-path (to avoid a READ_ONCE() and potential
759 ctx->scoped_accesses.prev = NULL;
761 ctx->disable_count--;
763 __kcsan_check_access(sa->ptr, sa->size, sa->type);
765 EXPORT_SYMBOL(kcsan_end_scoped_access);
767 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
769 check_access(ptr, size, type);
771 EXPORT_SYMBOL(__kcsan_check_access);
774 * KCSAN uses the same instrumentation that is emitted by supported compilers
775 * for ThreadSanitizer (TSAN).
777 * When enabled, the compiler emits instrumentation calls (the functions
778 * prefixed with "__tsan" below) for all loads and stores that it generated;
779 * inline asm is not instrumented.
781 * Note that, not all supported compiler versions distinguish aligned/unaligned
782 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
783 * version to the generic version, which can handle both.
786 #define DEFINE_TSAN_READ_WRITE(size) \
787 void __tsan_read##size(void *ptr); \
788 void __tsan_read##size(void *ptr) \
790 check_access(ptr, size, 0); \
792 EXPORT_SYMBOL(__tsan_read##size); \
793 void __tsan_unaligned_read##size(void *ptr) \
794 __alias(__tsan_read##size); \
795 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
796 void __tsan_write##size(void *ptr); \
797 void __tsan_write##size(void *ptr) \
799 check_access(ptr, size, KCSAN_ACCESS_WRITE); \
801 EXPORT_SYMBOL(__tsan_write##size); \
802 void __tsan_unaligned_write##size(void *ptr) \
803 __alias(__tsan_write##size); \
804 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
805 void __tsan_read_write##size(void *ptr); \
806 void __tsan_read_write##size(void *ptr) \
808 check_access(ptr, size, \
809 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
811 EXPORT_SYMBOL(__tsan_read_write##size); \
812 void __tsan_unaligned_read_write##size(void *ptr) \
813 __alias(__tsan_read_write##size); \
814 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
816 DEFINE_TSAN_READ_WRITE(1);
817 DEFINE_TSAN_READ_WRITE(2);
818 DEFINE_TSAN_READ_WRITE(4);
819 DEFINE_TSAN_READ_WRITE(8);
820 DEFINE_TSAN_READ_WRITE(16);
822 void __tsan_read_range(void *ptr, size_t size);
823 void __tsan_read_range(void *ptr, size_t size)
825 check_access(ptr, size, 0);
827 EXPORT_SYMBOL(__tsan_read_range);
829 void __tsan_write_range(void *ptr, size_t size);
830 void __tsan_write_range(void *ptr, size_t size)
832 check_access(ptr, size, KCSAN_ACCESS_WRITE);
834 EXPORT_SYMBOL(__tsan_write_range);
837 * Use of explicit volatile is generally disallowed [1], however, volatile is
838 * still used in various concurrent context, whether in low-level
839 * synchronization primitives or for legacy reasons.
840 * [1] https://lwn.net/Articles/233479/
842 * We only consider volatile accesses atomic if they are aligned and would pass
843 * the size-check of compiletime_assert_rwonce_type().
845 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
846 void __tsan_volatile_read##size(void *ptr); \
847 void __tsan_volatile_read##size(void *ptr) \
849 const bool is_atomic = size <= sizeof(long long) && \
850 IS_ALIGNED((unsigned long)ptr, size); \
851 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
853 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
855 EXPORT_SYMBOL(__tsan_volatile_read##size); \
856 void __tsan_unaligned_volatile_read##size(void *ptr) \
857 __alias(__tsan_volatile_read##size); \
858 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
859 void __tsan_volatile_write##size(void *ptr); \
860 void __tsan_volatile_write##size(void *ptr) \
862 const bool is_atomic = size <= sizeof(long long) && \
863 IS_ALIGNED((unsigned long)ptr, size); \
864 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
866 check_access(ptr, size, \
867 KCSAN_ACCESS_WRITE | \
868 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
870 EXPORT_SYMBOL(__tsan_volatile_write##size); \
871 void __tsan_unaligned_volatile_write##size(void *ptr) \
872 __alias(__tsan_volatile_write##size); \
873 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
875 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
876 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
877 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
878 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
879 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
882 * The below are not required by KCSAN, but can still be emitted by the
885 void __tsan_func_entry(void *call_pc);
886 void __tsan_func_entry(void *call_pc)
889 EXPORT_SYMBOL(__tsan_func_entry);
890 void __tsan_func_exit(void);
891 void __tsan_func_exit(void)
894 EXPORT_SYMBOL(__tsan_func_exit);
895 void __tsan_init(void);
896 void __tsan_init(void)
899 EXPORT_SYMBOL(__tsan_init);
902 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
904 * Normal kernel code _should not_ be using them directly, but some
905 * architectures may implement some or all atomics using the compilers'
908 * Note: If an architecture decides to fully implement atomics using the
909 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
910 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
911 * atomic-instrumented) is no longer necessary.
913 * TSAN instrumentation replaces atomic accesses with calls to any of the below
914 * functions, whose job is to also execute the operation itself.
917 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
918 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
919 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
921 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
922 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
924 return __atomic_load_n(ptr, memorder); \
926 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
927 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
928 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
930 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
931 check_access(ptr, bits / BITS_PER_BYTE, \
932 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
934 __atomic_store_n(ptr, v, memorder); \
936 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
938 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
939 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
940 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
942 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
943 check_access(ptr, bits / BITS_PER_BYTE, \
944 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
945 KCSAN_ACCESS_ATOMIC); \
947 return __atomic_##op##suffix(ptr, v, memorder); \
949 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
952 * Note: CAS operations are always classified as write, even in case they
953 * fail. We cannot perform check_access() after a write, as it might lead to
954 * false positives, in cases such as:
956 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
958 * T1: if (__atomic_load_n(&p->flag, ...)) {
963 * The only downside is that, if there are 3 threads, with one CAS that
964 * succeeds, another CAS that fails, and an unmarked racing operation, we may
965 * point at the wrong CAS as the source of the race. However, if we assume that
966 * all CAS can succeed in some other execution, the data race is still valid.
968 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
969 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
970 u##bits val, int mo, int fail_mo); \
971 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
972 u##bits val, int mo, int fail_mo) \
974 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
975 check_access(ptr, bits / BITS_PER_BYTE, \
976 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
977 KCSAN_ACCESS_ATOMIC); \
979 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
981 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
983 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
984 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
985 int mo, int fail_mo); \
986 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
987 int mo, int fail_mo) \
989 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
990 check_access(ptr, bits / BITS_PER_BYTE, \
991 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
992 KCSAN_ACCESS_ATOMIC); \
994 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
997 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
999 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1000 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1001 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1002 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1003 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1004 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1005 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1006 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1007 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1008 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1009 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1010 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1012 DEFINE_TSAN_ATOMIC_OPS(8);
1013 DEFINE_TSAN_ATOMIC_OPS(16);
1014 DEFINE_TSAN_ATOMIC_OPS(32);
1015 DEFINE_TSAN_ATOMIC_OPS(64);
1017 void __tsan_atomic_thread_fence(int memorder);
1018 void __tsan_atomic_thread_fence(int memorder)
1020 __atomic_thread_fence(memorder);
1022 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1024 void __tsan_atomic_signal_fence(int memorder);
1025 void __tsan_atomic_signal_fence(int memorder) { }
1026 EXPORT_SYMBOL(__tsan_atomic_signal_fence);