kfence: move saving stack trace of allocations into __kfence_alloc()
authorMarco Elver <elver@google.com>
Fri, 5 Nov 2021 20:45:31 +0000 (13:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Nov 2021 20:30:43 +0000 (13:30 -0700)
Move the saving of the stack trace of allocations into __kfence_alloc(),
so that the stack entries array can be used outside of
kfence_guarded_alloc() and we avoid potentially unwinding the stack
multiple times.

Link: https://lkml.kernel.org/r/20210923104803.2620285-3-elver@google.com
Signed-off-by: Marco Elver <elver@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Acked-by: Alexander Potapenko <glider@google.com>
Cc: Aleksandr Nogikh <nogikh@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Taras Madan <tarasmadan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/kfence/core.c

index 249d75b..db01814 100644 (file)
@@ -187,19 +187,26 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
  * Update the object's metadata state, including updating the alloc/free stacks
  * depending on the state transition.
  */
-static noinline void metadata_update_state(struct kfence_metadata *meta,
-                                          enum kfence_object_state next)
+static noinline void
+metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
+                     unsigned long *stack_entries, size_t num_stack_entries)
 {
        struct kfence_track *track =
                next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
 
        lockdep_assert_held(&meta->lock);
 
-       /*
-        * Skip over 1 (this) functions; noinline ensures we do not accidentally
-        * skip over the caller by never inlining.
-        */
-       track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
+       if (stack_entries) {
+               memcpy(track->stack_entries, stack_entries,
+                      num_stack_entries * sizeof(stack_entries[0]));
+       } else {
+               /*
+                * Skip over 1 (this) functions; noinline ensures we do not
+                * accidentally skip over the caller by never inlining.
+                */
+               num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
+       }
+       track->num_stack_entries = num_stack_entries;
        track->pid = task_pid_nr(current);
        track->cpu = raw_smp_processor_id();
        track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
@@ -261,7 +268,8 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
        }
 }
 
-static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
+static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
+                                 unsigned long *stack_entries, size_t num_stack_entries)
 {
        struct kfence_metadata *meta = NULL;
        unsigned long flags;
@@ -320,7 +328,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
        addr = (void *)meta->addr;
 
        /* Update remaining metadata. */
-       metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
+       metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
        /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
        WRITE_ONCE(meta->cache, cache);
        meta->size = size;
@@ -400,7 +408,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
                memzero_explicit(addr, meta->size);
 
        /* Mark the object as freed. */
-       metadata_update_state(meta, KFENCE_OBJECT_FREED);
+       metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
 
        raw_spin_unlock_irqrestore(&meta->lock, flags);
 
@@ -742,6 +750,9 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 
 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
 {
+       unsigned long stack_entries[KFENCE_STACK_DEPTH];
+       size_t num_stack_entries;
+
        /*
         * Perform size check before switching kfence_allocation_gate, so that
         * we don't disable KFENCE without making an allocation.
@@ -786,7 +797,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
        if (!READ_ONCE(kfence_enabled))
                return NULL;
 
-       return kfence_guarded_alloc(s, size, flags);
+       num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
+
+       return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries);
 }
 
 size_t kfence_ksize(const void *addr)