* to its declaration for more details.
*/
#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
-#define STACK_DEPOT_FLAGS_NUM 1
+#define STACK_DEPOT_FLAGS_NUM 2
#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
/*
* flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
* fails if no space is left to store the stack trace.
*
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ *
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See stack_depot_save_flags() for more details.
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stacktrace.h>
u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
+ refcount_t count;
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
};
stack->hash = hash;
stack->size = size;
/* stack->handle is already filled in by depot_init_pool(). */
+ refcount_set(&stack->count, 1);
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
/*
/* Fast path: look the stack trace up without full locking. */
found = find_stack(bucket, entries, nr_entries, hash);
if (found) {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
}
list_add(&new->list, bucket);
found = new;
}
- } else if (prealloc) {
+ } else {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
/*
* Stack depot already contains this stack trace, but let's
* keep the preallocated memory for future.
*/
- depot_keep_new_pool(&prealloc);
+ if (prealloc)
+ depot_keep_new_pool(&prealloc);
}
write_unlock_irqrestore(&pool_rwlock, flags);