Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / kmemleak.c
index 1eddc01..6a540c2 100644 (file)
  *
  * The following locks and mutexes are used by kmemleak:
  *
- * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
- *   accesses to the object_tree_root (or object_phys_tree_root). The
- *   object_list is the main list holding the metadata (struct kmemleak_object)
- *   for the allocated memory blocks. The object_tree_root and object_phys_tree_root
- *   are red black trees used to look-up metadata based on a pointer to the
- *   corresponding memory block. The object_phys_tree_root is for objects
- *   allocated with physical address. The kmemleak_object structures are
- *   added to the object_list and object_tree_root (or object_phys_tree_root)
- *   in the create_object() function called from the kmemleak_alloc() (or
- *   kmemleak_alloc_phys()) callback and removed in delete_object() called from
- *   the kmemleak_free() callback
+ * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
+ *   del_state modifications and accesses to the object trees
+ *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
+ *   object_list is the main list holding the metadata (struct
+ *   kmemleak_object) for the allocated memory blocks. The object trees are
+ *   red black trees used to look-up metadata based on a pointer to the
+ *   corresponding memory block. The kmemleak_object structures are added to
+ *   the object_list and the object tree root in the create_object() function
+ *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
+ *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
  *   that some members of this structure may be protected by other means
@@ -79,6 +78,7 @@
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
 #include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/memblock.h>
@@ -147,6 +147,7 @@ struct kmemleak_object {
        struct rcu_head rcu;            /* object_list lockless traversal */
        /* object usage count; object freed when use_count == 0 */
        atomic_t use_count;
+       unsigned int del_state;         /* deletion state */
        unsigned long pointer;
        size_t size;
        /* pass surplus references to this pointer */
@@ -159,8 +160,7 @@ struct kmemleak_object {
        u32 checksum;
        /* memory ranges to be scanned inside an object (empty for all) */
        struct hlist_head area_list;
-       unsigned long trace[MAX_TRACE];
-       unsigned int trace_len;
+       depot_stack_handle_t trace_handle;
        unsigned long jiffies;          /* creation timestamp */
        pid_t pid;                      /* pid of the current task */
        char comm[TASK_COMM_LEN];       /* executable name */
@@ -176,6 +176,13 @@ struct kmemleak_object {
 #define OBJECT_FULL_SCAN       (1 << 3)
 /* flag set for object allocated with physical address */
 #define OBJECT_PHYS            (1 << 4)
+/* flag set for per-CPU pointers */
+#define OBJECT_PERCPU          (1 << 5)
+
+/* set when __remove_object() called */
+#define DELSTATE_REMOVED       (1 << 0)
+/* set to temporarily prevent deletion from object_list */
+#define DELSTATE_NO_DELETE     (1 << 1)
 
 #define HEX_PREFIX             "    "
 /* number of bytes to print per line; must be 16 or 32 */
@@ -199,6 +206,8 @@ static LIST_HEAD(mem_pool_free_list);
 static struct rb_root object_tree_root = RB_ROOT;
 /* search tree for object (with OBJECT_PHYS flag) boundaries */
 static struct rb_root object_phys_tree_root = RB_ROOT;
+/* search tree for object (with OBJECT_PERCPU flag) boundaries */
+static struct rb_root object_percpu_tree_root = RB_ROOT;
 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 
@@ -211,7 +220,7 @@ static int kmemleak_enabled = 1;
 /* same as above but only for the kmemleak_free() callback */
 static int kmemleak_free_enabled = 1;
 /* set in the late_initcall if there were no errors */
-static int kmemleak_initialized;
+static int kmemleak_late_initialized;
 /* set if a kmemleak warning was issued */
 static int kmemleak_warning;
 /* set if a fatal kmemleak error has occurred */
@@ -291,7 +300,7 @@ static void hex_dump_object(struct seq_file *seq,
        const u8 *ptr = (const u8 *)object->pointer;
        size_t len;
 
-       if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+       if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
                return;
 
        /* limit the number of lines to HEX_MAX_LINES */
@@ -346,19 +355,20 @@ static void print_unreferenced(struct seq_file *seq,
                               struct kmemleak_object *object)
 {
        int i;
-       unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
+       unsigned long *entries;
+       unsigned int nr_entries;
 
+       nr_entries = stack_depot_fetch(object->trace_handle, &entries);
        warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
-                  object->pointer, object->size);
-       warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
-                  object->comm, object->pid, object->jiffies,
-                  msecs_age / 1000, msecs_age % 1000);
+                         object->pointer, object->size);
+       warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
+                          object->comm, object->pid, object->jiffies);
        hex_dump_object(seq, object);
-       warn_or_seq_printf(seq, "  backtrace:\n");
+       warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
 
-       for (i = 0; i < object->trace_len; i++) {
-               void *ptr = (void *)object->trace[i];
-               warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
+       for (i = 0; i < nr_entries; i++) {
+               void *ptr = (void *)entries[i];
+               warn_or_seq_printf(seq, "    [<%pK>] %pS\n", ptr, ptr);
        }
 }
 
@@ -370,15 +380,25 @@ static void print_unreferenced(struct seq_file *seq,
 static void dump_object_info(struct kmemleak_object *object)
 {
        pr_notice("Object 0x%08lx (size %zu):\n",
-                 object->pointer, object->size);
+                       object->pointer, object->size);
        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
-                 object->comm, object->pid, object->jiffies);
+                       object->comm, object->pid, object->jiffies);
        pr_notice("  min_count = %d\n", object->min_count);
        pr_notice("  count = %d\n", object->count);
        pr_notice("  flags = 0x%x\n", object->flags);
        pr_notice("  checksum = %u\n", object->checksum);
        pr_notice("  backtrace:\n");
-       stack_trace_print(object->trace, object->trace_len, 4);
+       if (object->trace_handle)
+               stack_depot_print(object->trace_handle);
+}
+
+static struct rb_root *object_tree(unsigned long objflags)
+{
+       if (objflags & OBJECT_PHYS)
+               return &object_phys_tree_root;
+       if (objflags & OBJECT_PERCPU)
+               return &object_percpu_tree_root;
+       return &object_tree_root;
 }
 
 /*
@@ -388,10 +408,9 @@ static void dump_object_info(struct kmemleak_object *object)
  * when calling this function.
  */
 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
-                                              bool is_phys)
+                                              unsigned int objflags)
 {
-       struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
-                            object_tree_root.rb_node;
+       struct rb_node *rb = object_tree(objflags)->rb_node;
        unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 
        while (rb) {
@@ -420,7 +439,7 @@ static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
 /* Look-up a kmemleak object which allocated with virtual address. */
 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 {
-       return __lookup_object(ptr, alias, false);
+       return __lookup_object(ptr, alias, 0);
 }
 
 /*
@@ -533,14 +552,14 @@ static void put_object(struct kmemleak_object *object)
  * Look up an object in the object search tree and increase its use_count.
  */
 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
-                                                    bool is_phys)
+                                                    unsigned int objflags)
 {
        unsigned long flags;
        struct kmemleak_object *object;
 
        rcu_read_lock();
        raw_spin_lock_irqsave(&kmemleak_lock, flags);
-       object = __lookup_object(ptr, alias, is_phys);
+       object = __lookup_object(ptr, alias, objflags);
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
        /* check whether the object is still available */
@@ -554,65 +573,74 @@ static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alia
 /* Look up and get an object which allocated with virtual address. */
 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 {
-       return __find_and_get_object(ptr, alias, false);
+       return __find_and_get_object(ptr, alias, 0);
 }
 
 /*
- * Remove an object from the object_tree_root (or object_phys_tree_root)
- * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
- * is still enabled.
+ * Remove an object from its object tree and object_list. Must be called with
+ * the kmemleak_lock held _if_ kmemleak is still enabled.
  */
 static void __remove_object(struct kmemleak_object *object)
 {
-       rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
-                                  &object_phys_tree_root :
-                                  &object_tree_root);
-       list_del_rcu(&object->object_list);
+       rb_erase(&object->rb_node, object_tree(object->flags));
+       if (!(object->del_state & DELSTATE_NO_DELETE))
+               list_del_rcu(&object->object_list);
+       object->del_state |= DELSTATE_REMOVED;
+}
+
+static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
+                                                       int alias,
+                                                       unsigned int objflags)
+{
+       struct kmemleak_object *object;
+
+       object = __lookup_object(ptr, alias, objflags);
+       if (object)
+               __remove_object(object);
+
+       return object;
 }
 
 /*
- * Look up an object in the object search tree and remove it from both
- * object_tree_root (or object_phys_tree_root) and object_list. The
- * returned object's use_count should be at least 1, as initially set
- * by create_object().
+ * Look up an object in the object search tree and remove it from both object
+ * tree root and object_list. The returned object's use_count should be at
+ * least 1, as initially set by create_object().
  */
 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
-                                                     bool is_phys)
+                                                     unsigned int objflags)
 {
        unsigned long flags;
        struct kmemleak_object *object;
 
        raw_spin_lock_irqsave(&kmemleak_lock, flags);
-       object = __lookup_object(ptr, alias, is_phys);
-       if (object)
-               __remove_object(object);
+       object = __find_and_remove_object(ptr, alias, objflags);
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
        return object;
 }
 
-/*
- * Save stack trace to the given array of MAX_TRACE size.
- */
-static int __save_stack_trace(unsigned long *trace)
+static noinline depot_stack_handle_t set_track_prepare(void)
 {
-       return stack_trace_save(trace, MAX_TRACE, 2);
+       depot_stack_handle_t trace_handle;
+       unsigned long entries[MAX_TRACE];
+       unsigned int nr_entries;
+
+       /*
+        * Use object_cache to determine whether kmemleak_init() has
+        * been invoked. stack_depot_early_init() is called before
+        * kmemleak_init() in mm_core_init().
+        */
+       if (!object_cache)
+               return 0;
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
+       trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+
+       return trace_handle;
 }
 
-/*
- * Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root (or
- * object_phys_tree_root).
- */
-static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
-                                            int min_count, gfp_t gfp,
-                                            bool is_phys)
+static struct kmemleak_object *__alloc_object(gfp_t gfp)
 {
-       unsigned long flags;
-       struct kmemleak_object *object, *parent;
-       struct rb_node **link, *rb_parent;
-       unsigned long untagged_ptr;
-       unsigned long untagged_objp;
+       struct kmemleak_object *object;
 
        object = mem_pool_alloc(gfp);
        if (!object) {
@@ -626,14 +654,10 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
        INIT_HLIST_HEAD(&object->area_list);
        raw_spin_lock_init(&object->lock);
        atomic_set(&object->use_count, 1);
-       object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
-       object->pointer = ptr;
-       object->size = kfence_ksize((void *)ptr) ?: size;
        object->excess_ref = 0;
-       object->min_count = min_count;
        object->count = 0;                      /* white color initially */
-       object->jiffies = jiffies;
        object->checksum = 0;
+       object->del_state = 0;
 
        /* task information */
        if (in_hardirq()) {
@@ -654,21 +678,36 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
        }
 
        /* kernel backtrace */
-       object->trace_len = __save_stack_trace(object->trace);
+       object->trace_handle = set_track_prepare();
 
-       raw_spin_lock_irqsave(&kmemleak_lock, flags);
+       return object;
+}
+
+static int __link_object(struct kmemleak_object *object, unsigned long ptr,
+                        size_t size, int min_count, unsigned int objflags)
+{
+
+       struct kmemleak_object *parent;
+       struct rb_node **link, *rb_parent;
+       unsigned long untagged_ptr;
+       unsigned long untagged_objp;
+
+       object->flags = OBJECT_ALLOCATED | objflags;
+       object->pointer = ptr;
+       object->size = kfence_ksize((void *)ptr) ?: size;
+       object->min_count = min_count;
+       object->jiffies = jiffies;
 
        untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
        /*
         * Only update min_addr and max_addr with object
         * storing virtual address.
         */
-       if (!is_phys) {
+       if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
                min_addr = min(min_addr, untagged_ptr);
                max_addr = max(max_addr, untagged_ptr + size);
        }
-       link = is_phys ? &object_phys_tree_root.rb_node :
-               &object_tree_root.rb_node;
+       link = &object_tree(objflags)->rb_node;
        rb_parent = NULL;
        while (*link) {
                rb_parent = *link;
@@ -686,33 +725,57 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
                         * be freed while the kmemleak_lock is held.
                         */
                        dump_object_info(parent);
-                       kmem_cache_free(object_cache, object);
-                       object = NULL;
-                       goto out;
+                       return -EEXIST;
                }
        }
        rb_link_node(&object->rb_node, rb_parent, link);
-       rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
-                                         &object_tree_root);
-
+       rb_insert_color(&object->rb_node, object_tree(objflags));
        list_add_tail_rcu(&object->object_list, &object_list);
-out:
+
+       return 0;
+}
+
+/*
+ * Create the metadata (struct kmemleak_object) corresponding to an allocated
+ * memory block and add it to the object_list and object tree.
+ */
+static void __create_object(unsigned long ptr, size_t size,
+                               int min_count, gfp_t gfp, unsigned int objflags)
+{
+       struct kmemleak_object *object;
+       unsigned long flags;
+       int ret;
+
+       object = __alloc_object(gfp);
+       if (!object)
+               return;
+
+       raw_spin_lock_irqsave(&kmemleak_lock, flags);
+       ret = __link_object(object, ptr, size, min_count, objflags);
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
-       return object;
+       if (ret)
+               mem_pool_free(object);
 }
 
 /* Create kmemleak object which allocated with virtual address. */
-static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
-                                            int min_count, gfp_t gfp)
+static void create_object(unsigned long ptr, size_t size,
+                         int min_count, gfp_t gfp)
 {
-       return __create_object(ptr, size, min_count, gfp, false);
+       __create_object(ptr, size, min_count, gfp, 0);
 }
 
 /* Create kmemleak object which allocated with physical address. */
-static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
-                                            int min_count, gfp_t gfp)
+static void create_object_phys(unsigned long ptr, size_t size,
+                              int min_count, gfp_t gfp)
+{
+       __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
+}
+
+/* Create kmemleak object corresponding to a per-CPU allocation. */
+static void create_object_percpu(unsigned long ptr, size_t size,
+                                int min_count, gfp_t gfp)
 {
-       return __create_object(ptr, size, min_count, gfp, true);
+       __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
 }
 
 /*
@@ -739,11 +802,11 @@ static void __delete_object(struct kmemleak_object *object)
  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
  * delete it.
  */
-static void delete_object_full(unsigned long ptr)
+static void delete_object_full(unsigned long ptr, unsigned int objflags)
 {
        struct kmemleak_object *object;
 
-       object = find_and_remove_object(ptr, 0, false);
+       object = find_and_remove_object(ptr, 0, objflags);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -759,18 +822,28 @@ static void delete_object_full(unsigned long ptr)
  * delete it. If the memory block is partially freed, the function may create
  * additional metadata for the remaining parts of the block.
  */
-static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
+static void delete_object_part(unsigned long ptr, size_t size,
+                              unsigned int objflags)
 {
-       struct kmemleak_object *object;
-       unsigned long start, end;
+       struct kmemleak_object *object, *object_l, *object_r;
+       unsigned long start, end, flags;
+
+       object_l = __alloc_object(GFP_KERNEL);
+       if (!object_l)
+               return;
 
-       object = find_and_remove_object(ptr, 1, is_phys);
+       object_r = __alloc_object(GFP_KERNEL);
+       if (!object_r)
+               goto out;
+
+       raw_spin_lock_irqsave(&kmemleak_lock, flags);
+       object = __find_and_remove_object(ptr, 1, objflags);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
                              ptr, size);
 #endif
-               return;
+               goto unlock;
        }
 
        /*
@@ -780,14 +853,25 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
         */
        start = object->pointer;
        end = object->pointer + object->size;
-       if (ptr > start)
-               __create_object(start, ptr - start, object->min_count,
-                             GFP_KERNEL, is_phys);
-       if (ptr + size < end)
-               __create_object(ptr + size, end - ptr - size, object->min_count,
-                             GFP_KERNEL, is_phys);
+       if ((ptr > start) &&
+           !__link_object(object_l, start, ptr - start,
+                          object->min_count, objflags))
+               object_l = NULL;
+       if ((ptr + size < end) &&
+           !__link_object(object_r, ptr + size, end - ptr - size,
+                          object->min_count, objflags))
+               object_r = NULL;
+
+unlock:
+       raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
+       if (object)
+               __delete_object(object);
 
-       __delete_object(object);
+out:
+       if (object_l)
+               mem_pool_free(object_l);
+       if (object_r)
+               mem_pool_free(object_r);
 }
 
 static void __paint_it(struct kmemleak_object *object, int color)
@@ -806,11 +890,11 @@ static void paint_it(struct kmemleak_object *object, int color)
        raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
-static void paint_ptr(unsigned long ptr, int color, bool is_phys)
+static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
 {
        struct kmemleak_object *object;
 
-       object = __find_and_get_object(ptr, 0, is_phys);
+       object = __find_and_get_object(ptr, 0, objflags);
        if (!object) {
                kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
                              ptr,
@@ -828,16 +912,16 @@ static void paint_ptr(unsigned long ptr, int color, bool is_phys)
  */
 static void make_gray_object(unsigned long ptr)
 {
-       paint_ptr(ptr, KMEMLEAK_GREY, false);
+       paint_ptr(ptr, KMEMLEAK_GREY, 0);
 }
 
 /*
  * Mark the object as black-colored so that it is ignored from scans and
  * reporting.
  */
-static void make_black_object(unsigned long ptr, bool is_phys)
+static void make_black_object(unsigned long ptr, unsigned int objflags)
 {
-       paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
+       paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
 }
 
 /*
@@ -954,7 +1038,7 @@ static void object_no_scan(unsigned long ptr)
 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
                          gfp_t gfp)
 {
-       pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
+       pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                create_object((unsigned long)ptr, size, min_count, gfp);
@@ -973,18 +1057,14 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
                                 gfp_t gfp)
 {
-       unsigned int cpu;
-
-       pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
+       pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
 
        /*
         * Percpu allocations are only scanned and not reported as leaks
         * (min_count is set to 0).
         */
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-               for_each_possible_cpu(cpu)
-                       create_object((unsigned long)per_cpu_ptr(ptr, cpu),
-                                     size, 0, gfp);
+               create_object_percpu((unsigned long)ptr, size, 0, gfp);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 
@@ -999,7 +1079,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
  */
 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
 {
-       pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
+       pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
 
        /*
         * A min_count = 2 is needed because vm_struct contains a reference to
@@ -1022,10 +1102,10 @@ EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
  */
 void __ref kmemleak_free(const void *ptr)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
-               delete_object_full((unsigned long)ptr);
+               delete_object_full((unsigned long)ptr, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free);
 
@@ -1040,10 +1120,10 @@ EXPORT_SYMBOL_GPL(kmemleak_free);
  */
 void __ref kmemleak_free_part(const void *ptr, size_t size)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-               delete_object_part((unsigned long)ptr, size, false);
+               delete_object_part((unsigned long)ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
 
@@ -1056,14 +1136,10 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
  */
 void __ref kmemleak_free_percpu(const void __percpu *ptr)
 {
-       unsigned int cpu;
-
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
-               for_each_possible_cpu(cpu)
-                       delete_object_full((unsigned long)per_cpu_ptr(ptr,
-                                                                     cpu));
+               delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 
@@ -1077,9 +1153,10 @@ EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 void __ref kmemleak_update_trace(const void *ptr)
 {
        struct kmemleak_object *object;
+       depot_stack_handle_t trace_handle;
        unsigned long flags;
 
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
                return;
@@ -1093,8 +1170,9 @@ void __ref kmemleak_update_trace(const void *ptr)
                return;
        }
 
+       trace_handle = set_track_prepare();
        raw_spin_lock_irqsave(&object->lock, flags);
-       object->trace_len = __save_stack_trace(object->trace);
+       object->trace_handle = trace_handle;
        raw_spin_unlock_irqrestore(&object->lock, flags);
 
        put_object(object);
@@ -1110,7 +1188,7 @@ EXPORT_SYMBOL(kmemleak_update_trace);
  */
 void __ref kmemleak_not_leak(const void *ptr)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                make_gray_object((unsigned long)ptr);
@@ -1128,10 +1206,10 @@ EXPORT_SYMBOL(kmemleak_not_leak);
  */
 void __ref kmemleak_ignore(const void *ptr)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-               make_black_object((unsigned long)ptr, false);
+               make_black_object((unsigned long)ptr, 0);
 }
 EXPORT_SYMBOL(kmemleak_ignore);
 
@@ -1148,7 +1226,7 @@ EXPORT_SYMBOL(kmemleak_ignore);
  */
 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
                add_scan_area((unsigned long)ptr, size, gfp);
@@ -1166,7 +1244,7 @@ EXPORT_SYMBOL(kmemleak_scan_area);
  */
 void __ref kmemleak_no_scan(const void *ptr)
 {
-       pr_debug("%s(0x%p)\n", __func__, ptr);
+       pr_debug("%s(0x%px)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
                object_no_scan((unsigned long)ptr);
@@ -1182,7 +1260,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
  */
 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
 {
-       pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
+       pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
 
        if (kmemleak_enabled)
                /*
@@ -1202,10 +1280,10 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
  */
 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
 {
-       pr_debug("%s(0x%pa)\n", __func__, &phys);
+       pr_debug("%s(0x%px)\n", __func__, &phys);
 
        if (kmemleak_enabled)
-               delete_object_part((unsigned long)phys, size, true);
+               delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
 }
 EXPORT_SYMBOL(kmemleak_free_part_phys);
 
@@ -1216,10 +1294,10 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
  */
 void __ref kmemleak_ignore_phys(phys_addr_t phys)
 {
-       pr_debug("%s(0x%pa)\n", __func__, &phys);
+       pr_debug("%s(0x%px)\n", __func__, &phys);
 
        if (kmemleak_enabled)
-               make_black_object((unsigned long)phys, true);
+               make_black_object((unsigned long)phys, OBJECT_PHYS);
 }
 EXPORT_SYMBOL(kmemleak_ignore_phys);
 
@@ -1230,7 +1308,7 @@ static bool update_checksum(struct kmemleak_object *object)
 {
        u32 old_csum = object->checksum;
 
-       if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+       if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
                return false;
 
        kasan_disable_current();
@@ -1386,7 +1464,6 @@ static void scan_object(struct kmemleak_object *object)
 {
        struct kmemleak_scan_area *area;
        unsigned long flags;
-       void *obj_ptr;
 
        /*
         * Once the object->lock is acquired, the corresponding memory block
@@ -1399,14 +1476,27 @@ static void scan_object(struct kmemleak_object *object)
                /* already freed object */
                goto out;
 
-       obj_ptr = object->flags & OBJECT_PHYS ?
-                 __va((phys_addr_t)object->pointer) :
-                 (void *)object->pointer;
+       if (object->flags & OBJECT_PERCPU) {
+               unsigned int cpu;
+
+               for_each_possible_cpu(cpu) {
+                       void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
+                       void *end = start + object->size;
 
-       if (hlist_empty(&object->area_list) ||
+                       scan_block(start, end, object);
+
+                       raw_spin_unlock_irqrestore(&object->lock, flags);
+                       cond_resched();
+                       raw_spin_lock_irqsave(&object->lock, flags);
+                       if (!(object->flags & OBJECT_ALLOCATED))
+                               break;
+               }
+       } else if (hlist_empty(&object->area_list) ||
            object->flags & OBJECT_FULL_SCAN) {
-               void *start = obj_ptr;
-               void *end = obj_ptr + object->size;
+               void *start = object->flags & OBJECT_PHYS ?
+                               __va((phys_addr_t)object->pointer) :
+                               (void *)object->pointer;
+               void *end = start + object->size;
                void *next;
 
                do {
@@ -1421,11 +1511,12 @@ static void scan_object(struct kmemleak_object *object)
                        cond_resched();
                        raw_spin_lock_irqsave(&object->lock, flags);
                } while (object->flags & OBJECT_ALLOCATED);
-       } else
+       } else {
                hlist_for_each_entry(area, &object->area_list, node)
                        scan_block((void *)area->start,
                                   (void *)(area->start + area->size),
                                   object);
+       }
 out:
        raw_spin_unlock_irqrestore(&object->lock, flags);
 }
@@ -1463,6 +1554,35 @@ static void scan_gray_list(void)
        WARN_ON(!list_empty(&gray_list));
 }
 
+/*
+ * Conditionally call resched() in an object iteration loop while making sure
+ * that the given object won't go away without RCU read lock by performing a
+ * get_object() if necessaary.
+ */
+static void kmemleak_cond_resched(struct kmemleak_object *object)
+{
+       if (!get_object(object))
+               return; /* Try next object */
+
+       raw_spin_lock_irq(&kmemleak_lock);
+       if (object->del_state & DELSTATE_REMOVED)
+               goto unlock_put;        /* Object removed */
+       object->del_state |= DELSTATE_NO_DELETE;
+       raw_spin_unlock_irq(&kmemleak_lock);
+
+       rcu_read_unlock();
+       cond_resched();
+       rcu_read_lock();
+
+       raw_spin_lock_irq(&kmemleak_lock);
+       if (object->del_state & DELSTATE_REMOVED)
+               list_del_rcu(&object->object_list);
+       object->del_state &= ~DELSTATE_NO_DELETE;
+unlock_put:
+       raw_spin_unlock_irq(&kmemleak_lock);
+       put_object(object);
+}
+
 /*
  * Scan data sections and all the referenced memory blocks allocated via the
  * kernel's standard allocators. This function must be called with the
@@ -1474,16 +1594,12 @@ static void kmemleak_scan(void)
        struct zone *zone;
        int __maybe_unused i;
        int new_leaks = 0;
-       int loop1_cnt = 0;
 
        jiffies_last_scan = jiffies;
 
        /* prepare the kmemleak_object's */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               bool obj_pinned = false;
-
-               loop1_cnt++;
                raw_spin_lock_irq(&object->lock);
 #ifdef DEBUG
                /*
@@ -1509,32 +1625,13 @@ static void kmemleak_scan(void)
 
                /* reset the reference count (whiten the object) */
                object->count = 0;
-               if (color_gray(object) && get_object(object)) {
+               if (color_gray(object) && get_object(object))
                        list_add_tail(&object->gray_list, &gray_list);
-                       obj_pinned = true;
-               }
 
                raw_spin_unlock_irq(&object->lock);
 
-               /*
-                * Do a cond_resched() to avoid soft lockup every 64k objects.
-                * Make sure a reference has been taken so that the object
-                * won't go away without RCU read lock.
-                */
-               if (!(loop1_cnt & 0xffff)) {
-                       if (!obj_pinned && !get_object(object)) {
-                               /* Try the next object instead */
-                               loop1_cnt--;
-                               continue;
-                       }
-
-                       rcu_read_unlock();
-                       cond_resched();
-                       rcu_read_lock();
-
-                       if (!obj_pinned)
-                               put_object(object);
-               }
+               if (need_resched())
+                       kmemleak_cond_resched(object);
        }
        rcu_read_unlock();
 
@@ -1557,6 +1654,9 @@ static void kmemleak_scan(void)
                for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                        struct page *page = pfn_to_online_page(pfn);
 
+                       if (!(pfn & 63))
+                               cond_resched();
+
                        if (!page)
                                continue;
 
@@ -1567,8 +1667,6 @@ static void kmemleak_scan(void)
                        if (page_count(page) == 0)
                                continue;
                        scan_block(page, page + 1, NULL);
-                       if (!(pfn & 63))
-                               cond_resched();
                }
        }
        put_online_mems();
@@ -1602,6 +1700,9 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
+               if (need_resched())
+                       kmemleak_cond_resched(object);
+
                /*
                 * This is racy but we can save the overhead of lock/unlock
                 * calls. The missed objects, if any, should be caught in
@@ -1636,6 +1737,9 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
+               if (need_resched())
+                       kmemleak_cond_resched(object);
+
                /*
                 * This is racy but we can save the overhead of lock/unlock
                 * calls. The missed objects, if any, should be caught in
@@ -2024,7 +2128,7 @@ static void kmemleak_disable(void)
        kmemleak_enabled = 0;
 
        /* check whether it is too early for a kernel thread */
-       if (kmemleak_initialized)
+       if (kmemleak_late_initialized)
                schedule_work(&cleanup_work);
        else
                kmemleak_free_enabled = 0;
@@ -2041,8 +2145,10 @@ static int __init kmemleak_boot_config(char *str)
                return -EINVAL;
        if (strcmp(str, "off") == 0)
                kmemleak_disable();
-       else if (strcmp(str, "on") == 0)
+       else if (strcmp(str, "on") == 0) {
                kmemleak_skip_disable = 1;
+               stack_depot_request_early_init();
+       }
        else
                return -EINVAL;
        return 0;
@@ -2087,7 +2193,7 @@ void __init kmemleak_init(void)
  */
 static int __init kmemleak_late_init(void)
 {
-       kmemleak_initialized = 1;
+       kmemleak_late_initialized = 1;
 
        debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
 
@@ -2095,7 +2201,7 @@ static int __init kmemleak_late_init(void)
                /*
                 * Some error occurred and kmemleak was disabled. There is a
                 * small chance that kmemleak_disable() was called immediately
-                * after setting kmemleak_initialized and we may end up with
+                * after setting kmemleak_late_initialized and we may end up with
                 * two clean-up threads but serialized by scan_mutex.
                 */
                schedule_work(&cleanup_work);