// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic stack depot for storing stack traces.
+ * Stack depot - a stack trace storage that avoids duplication.
*
- * Some debugging tools need to save stack traces of certain events which can
- * be later presented to the user. For example, KASAN needs to safe alloc and
- * free stacks for each object, but storing two stack traces per object
- * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
- * that).
- *
- * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
- * and free stacks repeat a lot, we save about 100x space.
- * Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguous memory allocation.
+ * Internally, stack depot maintains a hash table of unique stacktraces. The
+ * stack traces themselves are stored contiguously one after another in a set
+ * of separate page allocations.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
+#define pr_fmt(fmt) "stackdepot: " fmt
+
+#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
+#include <linux/kmsan.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
-#include <linux/percpu.h>
+#include <linux/poison.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define STACK_ALLOC_NULL_PROTECTION_BITS 1
-#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
-#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
-#define STACK_ALLOC_ALIGN 4
-#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
- STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
- STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 8192
-#define STACK_ALLOC_MAX_SLABS \
- (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
- (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
-
-/* The compact structure to store the reference to stacks. */
-union handle_parts {
- depot_stack_handle_t handle;
- struct {
- u32 slabindex : STACK_ALLOC_INDEX_BITS;
- u32 offset : STACK_ALLOC_OFFSET_BITS;
- u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
- };
-};
+#define DEPOT_POOLS_CAP 8192
+/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
+#define DEPOT_MAX_POOLS \
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
+
+static bool stack_depot_disabled;
+static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
+/* Use one hash table bucket per 16 KB of memory. */
+#define STACK_HASH_TABLE_SCALE 14
+/* Limit the number of buckets between 4K and 1M. */
+#define STACK_BUCKET_NUMBER_ORDER_MIN 12
+#define STACK_BUCKET_NUMBER_ORDER_MAX 20
+/* Initial seed for jhash2. */
+#define STACK_HASH_SEED 0x9747b28c
+
+/* Hash table of stored stack records. */
+static struct list_head *stack_table;
+/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
+static unsigned int stack_bucket_number_order;
+/* Hash mask for indexing the table. */
+static unsigned int stack_hash_mask;
-struct stack_record {
- struct stack_record *next; /* Link in the hashtable */
- u32 hash; /* Hash in the hastable */
- u32 size; /* Number of frames in the stack */
- union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of entries. */
+/* Array of memory regions that store stack records. */
+static void *stack_pools[DEPOT_MAX_POOLS];
+/* Newly allocated pool that is not yet added to stack_pools. */
+static void *new_pool;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
+/* Freelist of stack records within stack_pools. */
+static LIST_HEAD(free_stacks);
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
+
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+ DEPOT_COUNTER_REFD_ALLOCS,
+ DEPOT_COUNTER_REFD_FREES,
+ DEPOT_COUNTER_REFD_INUSE,
+ DEPOT_COUNTER_FREELIST_SIZE,
+ DEPOT_COUNTER_PERSIST_COUNT,
+ DEPOT_COUNTER_PERSIST_BYTES,
+ DEPOT_COUNTER_COUNT,
};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+ [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
+ [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
+ [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
+ [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
+ [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
+ [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
-static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
-static bool __stack_depot_early_init_passed __initdata;
+static int __init disable_stack_depot(char *str)
+{
+ return kstrtobool(str, &stack_depot_disabled);
+}
+early_param("stack_depot_disable", disable_stack_depot);
-static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+void __init stack_depot_request_early_init(void)
+{
+ /* Too late to request early init now. */
+ WARN_ON(__stack_depot_early_init_passed);
-static int depot_index;
-static int next_slab_inited;
-static size_t depot_offset;
-static DEFINE_RAW_SPINLOCK(depot_lock);
+ __stack_depot_early_init_requested = true;
+}
-static bool init_stack_slab(void **prealloc)
+/* Initialize list_head's within the hash table. */
+static void init_stack_table(unsigned long entries)
{
- if (!*prealloc)
- return false;
+ unsigned long i;
+
+ for (i = 0; i < entries; i++)
+ INIT_LIST_HEAD(&stack_table[i]);
+}
+
+/* Allocates a hash table via memblock. Can only be used during early boot. */
+int __init stack_depot_early_init(void)
+{
+ unsigned long entries = 0;
+
+ /* This function must be called only once, from mm_init(). */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+ __stack_depot_early_init_passed = true;
+
/*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_slab_inited| below and in depot_alloc_stack().
+ * Print disabled message even if early init has not been requested:
+ * stack_depot_init() will not print one.
*/
- if (smp_load_acquire(&next_slab_inited))
- return true;
- if (stack_slabs[depot_index] == NULL) {
- stack_slabs[depot_index] = *prealloc;
- *prealloc = NULL;
- } else {
- /* If this is the last depot slab, do not touch the next one. */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
- stack_slabs[depot_index + 1] = *prealloc;
- *prealloc = NULL;
- }
+ if (stack_depot_disabled) {
+ pr_info("disabled\n");
+ return 0;
+ }
+
+ /*
+ * If KASAN is enabled, use the maximum order: KASAN is frequently used
+ * in fuzzing scenarios, which leads to a large number of different
+ * stack traces being stored in stack depot.
+ */
+ if (kasan_enabled() && !stack_bucket_number_order)
+ stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ /*
+ * Check if early init has been requested after setting
+ * stack_bucket_number_order: stack_depot_init() uses its value.
+ */
+ if (!__stack_depot_early_init_requested)
+ return 0;
+
+ /*
+ * If stack_bucket_number_order is not set, leave entries as 0 to rely
+ * on the automatic calculations performed by alloc_large_system_hash().
+ */
+ if (stack_bucket_number_order)
+ entries = 1UL << stack_bucket_number_order;
+ pr_info("allocating hash table via alloc_large_system_hash\n");
+ stack_table = alloc_large_system_hash("stackdepot",
+ sizeof(struct list_head),
+ entries,
+ STACK_HASH_TABLE_SCALE,
+ HASH_EARLY,
+ NULL,
+ &stack_hash_mask,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ return -ENOMEM;
+ }
+ if (!entries) {
/*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in stack_depot_save().
+ * Obtain the number of entries that was calculated by
+ * alloc_large_system_hash().
*/
- smp_store_release(&next_slab_inited, 1);
+ entries = stack_hash_mask + 1;
+ }
+ init_stack_table(entries);
+
+ return 0;
+}
+
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
+int stack_depot_init(void)
+{
+ static DEFINE_MUTEX(stack_depot_init_mutex);
+ unsigned long entries;
+ int ret = 0;
+
+ mutex_lock(&stack_depot_init_mutex);
+
+ if (stack_depot_disabled || stack_table)
+ goto out_unlock;
+
+ /*
+ * Similarly to stack_depot_early_init, use stack_bucket_number_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
+ if (stack_bucket_number_order) {
+ entries = 1UL << stack_bucket_number_order;
+ } else {
+ int scale = STACK_HASH_TABLE_SCALE;
+
+ entries = nr_free_buffer_pages();
+ entries = roundup_pow_of_two(entries);
+
+ if (scale > PAGE_SHIFT)
+ entries >>= (scale - PAGE_SHIFT);
+ else
+ entries <<= (PAGE_SHIFT - scale);
+ }
+
+ if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
+ if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
+ stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ stack_hash_mask = entries - 1;
+ init_stack_table(entries);
+
+out_unlock:
+ mutex_unlock(&stack_depot_init_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stack_depot_init);
+
+/*
+ * Initializes new stack pool, and updates the list of pools.
+ */
+static bool depot_init_pool(void **prealloc)
+{
+ lockdep_assert_held(&pool_lock);
+
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
}
+
+ if (!new_pool && *prealloc) {
+ /* We have preallocated memory, use it. */
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+ }
+
+ if (!new_pool)
+ return false; /* new_pool and *prealloc are NULL */
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = new_pool;
+
+ /*
+ * Stack depot tries to keep an extra pool allocated even before it runs
+ * out of space in the currently used pool.
+ *
+ * To indicate that a new preallocation is needed new_pool is reset to
+ * NULL; do not reset to NULL if we have reached the maximum number of
+ * pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool, NULL);
+ else
+ WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
+
+ /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+ WRITE_ONCE(pools_num, pools_num + 1);
+ ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
return true;
}
-/* Allocation of a new stack in raw storage */
-static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
+{
+ lockdep_assert_held(&pool_lock);
+
+ /*
+ * If a new pool is already saved or the maximum number of
+ * pools is reached, do not use the preallocated memory.
+ */
+ if (new_pool)
+ return;
+
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+}
+
+/*
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
+ */
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{
struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
+ void *current_pool;
+ u32 pool_index;
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+ lockdep_assert_held(&pool_lock);
- if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
return NULL;
- }
- depot_index++;
- depot_offset = 0;
- /*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in stack_depot_save() and
- * init_stack_slab().
- */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
- smp_store_release(&next_slab_inited, 0);
}
- init_stack_slab(prealloc);
- if (stack_slabs[depot_index] == NULL)
+
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
return NULL;
- stack = stack_slabs[depot_index] + depot_offset;
+ stack = current_pool + pool_offset;
- stack->hash = hash;
- stack->size = size;
- stack->handle.slabindex = depot_index;
- stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
- stack->handle.valid = 1;
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- depot_offset += required_size;
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index + 1;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);
+
+ pool_offset += size;
return stack;
}
-/* one hash table bucket entry per 16kB of memory */
-#define STACK_HASH_SCALE 14
-/* limited between 4k and 1M buckets */
-#define STACK_HASH_ORDER_MIN 12
-#define STACK_HASH_ORDER_MAX 20
-#define STACK_HASH_SEED 0x9747b28c
+/* Try to find next free usable entry from the freelist. */
+static struct stack_record *depot_pop_free(void)
+{
+ struct stack_record *stack;
-static unsigned int stack_hash_order;
-static unsigned int stack_hash_mask;
+ lockdep_assert_held(&pool_lock);
-static bool stack_depot_disable;
-static struct stack_record **stack_table;
+ if (list_empty(&free_stacks))
+ return NULL;
-static int __init is_stack_depot_disabled(char *str)
-{
- int ret;
+ /*
+ * We maintain the invariant that the elements in front are least
+ * recently used, and are therefore more likely to be associated with an
+ * RCU grace period in the past. Consequently it is sufficient to only
+ * check the first entry.
+ */
+ stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
+ return NULL;
- ret = kstrtobool(str, &stack_depot_disable);
- if (!ret && stack_depot_disable) {
- pr_info("Stack Depot is disabled\n");
- stack_table = NULL;
- }
- return 0;
+ list_del(&stack->free_list);
+ counters[DEPOT_COUNTER_FREELIST_SIZE]--;
+
+ return stack;
}
-early_param("stack_depot_disable", is_stack_depot_disabled);
-void __init stack_depot_want_early_init(void)
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
{
- /* Too late to request early init now */
- WARN_ON(__stack_depot_early_init_passed);
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
+
+ WARN_ON_ONCE(sizeof(s->entries) < used);
- __stack_depot_want_early_init = true;
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
}
-int __init stack_depot_early_init(void)
+/* Allocates a new stack in a stack depot pool. */
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{
- unsigned long entries = 0;
+ struct stack_record *stack = NULL;
+ size_t record_size;
- /* This is supposed to be called only once, from mm_init() */
- if (WARN_ON(__stack_depot_early_init_passed))
- return 0;
+ lockdep_assert_held(&pool_lock);
- __stack_depot_early_init_passed = true;
+ /* This should already be checked by public API entry points. */
+ if (WARN_ON_ONCE(!nr_entries))
+ return NULL;
- if (kasan_enabled() && !stack_hash_order)
- stack_hash_order = STACK_HASH_ORDER_MAX;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
- if (!__stack_depot_want_early_init || stack_depot_disable)
- return 0;
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ /*
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
+ */
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
+ stack = depot_pop_free();
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
+ }
- if (stack_hash_order)
- entries = 1UL << stack_hash_order;
- stack_table = alloc_large_system_hash("stackdepot",
- sizeof(struct stack_record *),
- entries,
- STACK_HASH_SCALE,
- HASH_EARLY | HASH_ZERO,
- NULL,
- &stack_hash_mask,
- 1UL << STACK_HASH_ORDER_MIN,
- 1UL << STACK_HASH_ORDER_MAX);
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }
- if (!stack_table) {
- pr_err("Stack Depot hash table allocation failed, disabling\n");
- stack_depot_disable = true;
- return -ENOMEM;
+ /* Save the stack trace. */
+ stack->hash = hash;
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ refcount_set(&stack->count, 1);
+ counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]++;
+ } else {
+ /* Warn on attempts to switch to refcounting this entry. */
+ refcount_set(&stack->count, REFCOUNT_SATURATED);
+ counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+ counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
}
- return 0;
+ /*
+ * Let KMSAN know the stored stack record is initialized. This shall
+ * prevent false positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(stack, record_size);
+
+ return stack;
}
-int stack_depot_init(void)
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
{
- static DEFINE_MUTEX(stack_depot_init_mutex);
- int ret = 0;
+ const int pools_num_cached = READ_ONCE(pools_num);
+ union handle_parts parts = { .handle = handle };
+ void *pool;
+ u32 pool_index = parts.pool_index - 1;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
- mutex_lock(&stack_depot_init_mutex);
- if (!stack_depot_disable && !stack_table) {
- unsigned long entries;
- int scale = STACK_HASH_SCALE;
-
- if (stack_hash_order) {
- entries = 1UL << stack_hash_order;
- } else {
- entries = nr_free_buffer_pages();
- entries = roundup_pow_of_two(entries);
-
- if (scale > PAGE_SHIFT)
- entries >>= (scale - PAGE_SHIFT);
- else
- entries <<= (PAGE_SHIFT - scale);
- }
+ lockdep_assert_not_held(&pool_lock);
- if (entries < 1UL << STACK_HASH_ORDER_MIN)
- entries = 1UL << STACK_HASH_ORDER_MIN;
- if (entries > 1UL << STACK_HASH_ORDER_MAX)
- entries = 1UL << STACK_HASH_ORDER_MAX;
-
- pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n",
- entries);
- stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
- if (!stack_table) {
- pr_err("Stack Depot hash table allocation failed, disabling\n");
- stack_depot_disable = true;
- ret = -ENOMEM;
- }
- stack_hash_mask = entries - 1;
+ if (pool_index >= pools_num_cached) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ pool_index, pools_num_cached, handle);
+ return NULL;
}
- mutex_unlock(&stack_depot_init_mutex);
- return ret;
+
+ pool = stack_pools[pool_index];
+ if (WARN_ON(!pool))
+ return NULL;
+
+ stack = pool + offset;
+ if (WARN_ON(!refcount_read(&stack->count)))
+ return NULL;
+
+ return stack;
}
-EXPORT_SYMBOL_GPL(stack_depot_init);
-/* Calculate hash for a stack */
+/* Links stack into the freelist. */
+static void depot_free_stack(struct stack_record *stack)
+{
+ unsigned long flags;
+
+ lockdep_assert_not_held(&pool_lock);
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
+
+ /*
+ * Remove the entry from the hash list. Concurrent list traversal may
+ * still observe the entry, but since the refcount is zero, this entry
+ * will no longer be considered as valid.
+ */
+ list_del_rcu(&stack->hash_list);
+
+ /*
+ * Due to being used from constrained contexts such as the allocators,
+ * NMI, or even RCU itself, stack depot cannot rely on primitives that
+ * would sleep (such as synchronize_rcu()) or recursively call into
+ * stack depot again (such as call_rcu()).
+ *
+ * Instead, get an RCU cookie, so that we can ensure this entry isn't
+ * moved onto another list until the next grace period, and concurrent
+ * RCU list traversal remains safe.
+ */
+ stack->rcu_state = get_state_synchronize_rcu();
+
+ /*
+ * Add the entry to the freelist tail, so that older entries are
+ * considered first - their RCU cookie is more likely to no longer be
+ * associated with the current grace period.
+ */
+ list_add_tail(&stack->free_list, &free_stacks);
+
+ counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ counters[DEPOT_COUNTER_REFD_FREES]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]--;
+
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+}
+
+/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
STACK_HASH_SEED);
}
-/* Use our own, non-instrumented version of memcmp().
- *
- * We actually don't care about the order, just the equality.
+/*
+ * Non-instrumented version of memcmp().
+ * Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
return 0;
}
-/* Find a stack that is equal to the one stored in entries in the hash */
-static inline struct stack_record *find_stack(struct stack_record *bucket,
- unsigned long *entries, int size,
- u32 hash)
+/* Finds a stack in a bucket of the hash table. */
+static inline struct stack_record *find_stack(struct list_head *bucket,
+ unsigned long *entries, int size,
+ u32 hash, depot_flags_t flags)
{
- struct stack_record *found;
+ struct stack_record *stack, *ret = NULL;
- for (found = bucket; found; found = found->next) {
- if (found->hash == hash &&
- found->size == size &&
- !stackdepot_memcmp(entries, found->entries, size))
- return found;
- }
- return NULL;
-}
-
-/**
- * stack_depot_snprint - print stack entries from a depot into a buffer
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @buf: Pointer to the print buffer
- *
- * @size: Size of the print buffer
- *
- * @spaces: Number of leading spaces to print
- *
- * Return: Number of bytes printed.
- */
-int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
- int spaces)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(handle, &entries);
- return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
- spaces) : 0;
-}
-EXPORT_SYMBOL_GPL(stack_depot_snprint);
-
-/**
- * stack_depot_print - print stack entries from a depot
- *
- * @stack: Stack depot handle which was returned from
- * stack_depot_save().
- *
- */
-void stack_depot_print(depot_stack_handle_t stack)
-{
- unsigned long *entries;
- unsigned int nr_entries;
+ /*
+ * Stack depot may be used from instrumentation that instruments RCU or
+ * tracing itself; use variant that does not call into RCU and cannot be
+ * traced.
+ *
+ * Note: Such use cases must take care when using refcounting to evict
+ * unused entries, because the stack record free-then-reuse code paths
+ * do call into RCU.
+ */
+ rcu_read_lock_sched_notrace();
- nr_entries = stack_depot_fetch(stack, &entries);
- if (nr_entries > 0)
- stack_trace_print(entries, nr_entries, 0);
-}
-EXPORT_SYMBOL_GPL(stack_depot_print);
+ list_for_each_entry_rcu(stack, bucket, hash_list) {
+ if (stack->hash != hash || stack->size != size)
+ continue;
-/**
- * stack_depot_fetch - Fetch stack entries from a depot
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
- *
- * Return: The number of trace entries for this depot.
- */
-unsigned int stack_depot_fetch(depot_stack_handle_t handle,
- unsigned long **entries)
-{
- union handle_parts parts = { .handle = handle };
- void *slab;
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack;
+ /*
+ * This may race with depot_free_stack() accessing the freelist
+ * management state unioned with @entries. The refcount is zero
+ * in that case and the below refcount_inc_not_zero() will fail.
+ */
+ if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+ continue;
- *entries = NULL;
- if (!handle)
- return 0;
+ /*
+ * Try to increment refcount. If this succeeds, the stack record
+ * is valid and has not yet been freed.
+ *
+ * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+ * to then call stack_depot_put() later, and we can assume that
+ * a stack record is never placed back on the freelist.
+ */
+ if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+ continue;
- if (parts.slabindex > depot_index) {
- WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
- parts.slabindex, depot_index, handle);
- return 0;
+ ret = stack;
+ break;
}
- slab = stack_slabs[parts.slabindex];
- if (!slab)
- return 0;
- stack = slab + offset;
- *entries = stack->entries;
- return stack->size;
+ rcu_read_unlock_sched_notrace();
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(stack_depot_fetch);
-/**
- * __stack_depot_save - Save a stack trace from an array
- *
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
- * @can_alloc: Allocate stack slabs (increased chance of failure if false)
- *
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, is allowed to replenish the stack slab pool in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and will fail if no space is left to store the stack trace.
- *
- * If the stack trace in @entries is from an interrupt, only the portion up to
- * interrupt entry is saved.
- *
- * Context: Any context, but setting @can_alloc to %false is required if
- * alloc_pages() cannot be used from the current context. Currently
- * this is the case from contexts where neither %GFP_ATOMIC nor
- * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
- *
- * Return: The handle of the stack struct stored in depot, 0 on failure.
- */
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags, bool can_alloc)
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags)
{
- struct stack_record *found = NULL, **bucket;
- depot_stack_handle_t retval = 0;
+ struct list_head *bucket;
+ struct stack_record *found = NULL;
+ depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
unsigned long flags;
u32 hash;
+ if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
+ return 0;
+
/*
* If this stack trace is from an interrupt, including anything before
- * interrupt entry usually leads to unbounded stackdepot growth.
+ * interrupt entry usually leads to unbounded stack depot growth.
*
- * Because use of filter_irq_stacks() is a requirement to ensure
- * stackdepot can efficiently deduplicate interrupt stacks, always
- * filter_irq_stacks() to simplify all callers' use of stackdepot.
+ * Since use of filter_irq_stacks() is a requirement to ensure stack
+ * depot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
- if (unlikely(nr_entries == 0) || stack_depot_disable)
- goto fast_exit;
+ if (unlikely(nr_entries == 0) || stack_depot_disabled)
+ return 0;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
- /*
- * Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
- */
- found = find_stack(smp_load_acquire(bucket), entries,
- nr_entries, hash);
+ /* Fast path: look the stack trace up without locking. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (found)
goto exit;
/*
- * Check if the current or the next stack slab need to be initialized.
- * If so, allocate the memory - we won't be able to do that under the
- * lock.
- *
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+ * Allocate memory for a new pool if required now:
+ * we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
+ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&depot_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
- found = find_stack(*bucket, entries, nr_entries, hash);
+ /* Try to find again, to avoid concurrently inserting duplicates. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
- struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ struct stack_record *new =
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
if (new) {
- new->next = *bucket;
/*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
+ * This releases the stack record into the bucket and
+ * makes it visible to readers in find_stack().
*/
- smp_store_release(bucket, new);
+ list_add_rcu(&new->hash_list, bucket);
found = new;
}
- } else if (prealloc) {
+ }
+
+ if (prealloc) {
/*
- * We didn't need to store this stack trace, but let's keep
- * the preallocated memory for the future.
+ * Either stack depot already contains this stack trace, or
+ * depot_alloc_stack() did not consume the preallocated memory.
+ * Try to keep the preallocated memory for future.
*/
- WARN_ON(!init_stack_slab(&prealloc));
+ depot_keep_new_pool(&prealloc);
}
- raw_spin_unlock_irqrestore(&depot_lock, flags);
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
- /* Nobody used this memory, ok to free it. */
- free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ /* Stack depot didn't use this memory, free it. */
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
- retval = found->handle.handle;
-fast_exit:
- return retval;
+ handle = found->handle.handle;
+ return handle;
}
-EXPORT_SYMBOL_GPL(__stack_depot_save);
+EXPORT_SYMBOL_GPL(stack_depot_save_flags);
-/**
- * stack_depot_save - Save a stack trace from an array
- *
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
- *
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
- *
- * Return: The handle of the stack struct stored in depot, 0 on failure.
- */
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+ return stack_depot_save_flags(entries, nr_entries, alloc_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
+{
+ if (!handle)
+ return NULL;
+
+ return depot_fetch_stack(handle);
+}
+
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
+{
+ struct stack_record *stack;
+
+ *entries = NULL;
+ /*
+ * Let KMSAN know *entries is initialized. This shall prevent false
+ * positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(entries, sizeof(*entries));
+
+ if (!handle || stack_depot_disabled)
+ return 0;
+
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should never be NULL, otherwise this is a use-after-put (or just a
+ * corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
+ return 0;
+
+ *entries = stack->entries;
+ return stack->size;
+}
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
+
+void stack_depot_put(depot_stack_handle_t handle)
+{
+ struct stack_record *stack;
+
+ if (!handle || stack_depot_disabled)
+ return;
+
+ stack = depot_fetch_stack(handle);
+ /*
+ * Should always be able to find the stack record, otherwise this is an
+ * unbalanced put attempt (or corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+ return;
+
+ if (refcount_dec_and_test(&stack->count))
+ depot_free_stack(stack);
+}
+EXPORT_SYMBOL_GPL(stack_depot_put);
+
+void stack_depot_print(depot_stack_handle_t stack)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(stack, &entries);
+ if (nr_entries > 0)
+ stack_trace_print(entries, nr_entries, 0);
+}
+EXPORT_SYMBOL_GPL(stack_depot_print);
+
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(handle, &entries);
+ return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
+ spaces) : 0;
+}
+EXPORT_SYMBOL_GPL(stack_depot_snprint);
+
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits)
+{
+ union handle_parts parts = { .handle = handle };
+
+ /* Don't set extra bits on empty handles. */
+ if (!handle)
+ return 0;
+
+ parts.extra = extra_bits;
+ return parts.handle;
+}
+EXPORT_SYMBOL(stack_depot_set_extra_bits);
+
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+ /*
+ * data race ok: These are just statistics counters, and approximate
+ * statistics are ok for debugging.
+ */
+ seq_printf(seq, "pools: %d\n", data_race(pools_num));
+ for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+ seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ if (stack_depot_disabled)
+ return 0;
+
+ dir = debugfs_create_dir("stackdepot", NULL);
+ debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+ return 0;
+}
+late_initcall(depot_debugfs_init);