Merge tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / kmemleak.c
index a182f5d..1eddc01 100644 (file)
  * The following locks and mutexes are used by kmemleak:
  *
  * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
- *   accesses to the object_tree_root. The object_list is the main list
- *   holding the metadata (struct kmemleak_object) for the allocated memory
- *   blocks. The object_tree_root is a red black tree used to look-up
- *   metadata based on a pointer to the corresponding memory block.  The
- *   kmemleak_object structures are added to the object_list and
- *   object_tree_root in the create_object() function called from the
- *   kmemleak_alloc() callback and removed in delete_object() called from the
- *   kmemleak_free() callback
+ *   accesses to the object_tree_root (or object_phys_tree_root). The
+ *   object_list is the main list holding the metadata (struct kmemleak_object)
+ *   for the allocated memory blocks. The object_tree_root and object_phys_tree_root
+ *   are red black trees used to look-up metadata based on a pointer to the
+ *   corresponding memory block. The object_phys_tree_root is for objects
+ *   allocated with physical address. The kmemleak_object structures are
+ *   added to the object_list and object_tree_root (or object_phys_tree_root)
+ *   in the create_object() function called from the kmemleak_alloc() (or
+ *   kmemleak_alloc_phys()) callback and removed in delete_object() called from
+ *   the kmemleak_free() callback
  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
  *   that some members of this structure may be protected by other means
@@ -172,6 +174,8 @@ struct kmemleak_object {
 #define OBJECT_NO_SCAN         (1 << 2)
 /* flag set to fully scan the object when scan_area allocation failed */
 #define OBJECT_FULL_SCAN       (1 << 3)
+/* flag set for object allocated with physical address */
+#define OBJECT_PHYS            (1 << 4)
 
 #define HEX_PREFIX             "    "
 /* number of bytes to print per line; must be 16 or 32 */
@@ -193,7 +197,9 @@ static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 static LIST_HEAD(mem_pool_free_list);
 /* search tree for object boundaries */
 static struct rb_root object_tree_root = RB_ROOT;
-/* protecting the access to object_list and object_tree_root */
+/* search tree for object (with OBJECT_PHYS flag) boundaries */
+static struct rb_root object_phys_tree_root = RB_ROOT;
+/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 
 /* allocation caches for kmemleak internal data */
@@ -285,6 +291,9 @@ static void hex_dump_object(struct seq_file *seq,
        const u8 *ptr = (const u8 *)object->pointer;
        size_t len;
 
+       if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+               return;
+
        /* limit the number of lines to HEX_MAX_LINES */
        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
@@ -378,9 +387,11 @@ static void dump_object_info(struct kmemleak_object *object)
  * beginning of the memory block are allowed. The kmemleak_lock must be held
  * when calling this function.
  */
-static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
+static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
+                                              bool is_phys)
 {
-       struct rb_node *rb = object_tree_root.rb_node;
+       struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
+                            object_tree_root.rb_node;
        unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 
        while (rb) {
@@ -406,6 +417,12 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
        return NULL;
 }
 
+/* Look-up a kmemleak object which allocated with virtual address. */
+static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
+{
+       return __lookup_object(ptr, alias, false);
+}
+
 /*
  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
  * that once an object's use_count reached 0, the RCU freeing was already
@@ -515,14 +532,15 @@ static void put_object(struct kmemleak_object *object)
 /*
  * Look up an object in the object search tree and increase its use_count.
  */
-static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
+static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
+                                                    bool is_phys)
 {
        unsigned long flags;
        struct kmemleak_object *object;
 
        rcu_read_lock();
        raw_spin_lock_irqsave(&kmemleak_lock, flags);
-       object = lookup_object(ptr, alias);
+       object = __lookup_object(ptr, alias, is_phys);
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
        /* check whether the object is still available */
@@ -533,28 +551,39 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
        return object;
 }
 
+/* Look up and get an object which allocated with virtual address. */
+static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
+{
+       return __find_and_get_object(ptr, alias, false);
+}
+
 /*
- * Remove an object from the object_tree_root and object_list. Must be called
- * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ * Remove an object from the object_tree_root (or object_phys_tree_root)
+ * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
+ * is still enabled.
  */
 static void __remove_object(struct kmemleak_object *object)
 {
-       rb_erase(&object->rb_node, &object_tree_root);
+       rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
+                                  &object_phys_tree_root :
+                                  &object_tree_root);
        list_del_rcu(&object->object_list);
 }
 
 /*
  * Look up an object in the object search tree and remove it from both
- * object_tree_root and object_list. The returned object's use_count should be
- * at least 1, as initially set by create_object().
+ * object_tree_root (or object_phys_tree_root) and object_list. The
+ * returned object's use_count should be at least 1, as initially set
+ * by create_object().
  */
-static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
+static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
+                                                     bool is_phys)
 {
        unsigned long flags;
        struct kmemleak_object *object;
 
        raw_spin_lock_irqsave(&kmemleak_lock, flags);
-       object = lookup_object(ptr, alias);
+       object = __lookup_object(ptr, alias, is_phys);
        if (object)
                __remove_object(object);
        raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
@@ -572,10 +601,12 @@ static int __save_stack_trace(unsigned long *trace)
 
 /*
  * Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root.
+ * memory block and add it to the object_list and object_tree_root (or
+ * object_phys_tree_root).
  */
-static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
-                                            int min_count, gfp_t gfp)
+static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
+                                            int min_count, gfp_t gfp,
+                                            bool is_phys)
 {
        unsigned long flags;
        struct kmemleak_object *object, *parent;
@@ -595,7 +626,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        INIT_HLIST_HEAD(&object->area_list);
        raw_spin_lock_init(&object->lock);
        atomic_set(&object->use_count, 1);
-       object->flags = OBJECT_ALLOCATED;
+       object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
        object->pointer = ptr;
        object->size = kfence_ksize((void *)ptr) ?: size;
        object->excess_ref = 0;
@@ -628,9 +659,16 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        raw_spin_lock_irqsave(&kmemleak_lock, flags);
 
        untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
-       min_addr = min(min_addr, untagged_ptr);
-       max_addr = max(max_addr, untagged_ptr + size);
-       link = &object_tree_root.rb_node;
+       /*
+        * Only update min_addr and max_addr with object
+        * storing virtual address.
+        */
+       if (!is_phys) {
+               min_addr = min(min_addr, untagged_ptr);
+               max_addr = max(max_addr, untagged_ptr + size);
+       }
+       link = is_phys ? &object_phys_tree_root.rb_node :
+               &object_tree_root.rb_node;
        rb_parent = NULL;
        while (*link) {
                rb_parent = *link;
@@ -654,7 +692,8 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
                }
        }
        rb_link_node(&object->rb_node, rb_parent, link);
-       rb_insert_color(&object->rb_node, &object_tree_root);
+       rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
+                                         &object_tree_root);
 
        list_add_tail_rcu(&object->object_list, &object_list);
 out:
@@ -662,6 +701,20 @@ out:
        return object;
 }
 
+/* Create kmemleak object which allocated with virtual address. */
+static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
+                                            int min_count, gfp_t gfp)
+{
+       return __create_object(ptr, size, min_count, gfp, false);
+}
+
+/* Create kmemleak object which allocated with physical address. */
+static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
+                                            int min_count, gfp_t gfp)
+{
+       return __create_object(ptr, size, min_count, gfp, true);
+}
+
 /*
  * Mark the object as not allocated and schedule RCU freeing via put_object().
  */
@@ -690,7 +743,7 @@ static void delete_object_full(unsigned long ptr)
 {
        struct kmemleak_object *object;
 
-       object = find_and_remove_object(ptr, 0);
+       object = find_and_remove_object(ptr, 0, false);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -706,12 +759,12 @@ static void delete_object_full(unsigned long ptr)
  * delete it. If the memory block is partially freed, the function may create
  * additional metadata for the remaining parts of the block.
  */
-static void delete_object_part(unsigned long ptr, size_t size)
+static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
 {
        struct kmemleak_object *object;
        unsigned long start, end;
 
-       object = find_and_remove_object(ptr, 1);
+       object = find_and_remove_object(ptr, 1, is_phys);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
@@ -728,11 +781,11 @@ static void delete_object_part(unsigned long ptr, size_t size)
        start = object->pointer;
        end = object->pointer + object->size;
        if (ptr > start)
-               create_object(start, ptr - start, object->min_count,
-                             GFP_KERNEL);
+               __create_object(start, ptr - start, object->min_count,
+                             GFP_KERNEL, is_phys);
        if (ptr + size < end)
-               create_object(ptr + size, end - ptr - size, object->min_count,
-                             GFP_KERNEL);
+               __create_object(ptr + size, end - ptr - size, object->min_count,
+                             GFP_KERNEL, is_phys);
 
        __delete_object(object);
 }
@@ -753,11 +806,11 @@ static void paint_it(struct kmemleak_object *object, int color)
        raw_spin_unlock_irqrestore(&object->lock, flags);
 }
 
-static void paint_ptr(unsigned long ptr, int color)
+static void paint_ptr(unsigned long ptr, int color, bool is_phys)
 {
        struct kmemleak_object *object;
 
-       object = find_and_get_object(ptr, 0);
+       object = __find_and_get_object(ptr, 0, is_phys);
        if (!object) {
                kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
                              ptr,
@@ -775,16 +828,16 @@ static void paint_ptr(unsigned long ptr, int color)
  */
 static void make_gray_object(unsigned long ptr)
 {
-       paint_ptr(ptr, KMEMLEAK_GREY);
+       paint_ptr(ptr, KMEMLEAK_GREY, false);
 }
 
 /*
  * Mark the object as black-colored so that it is ignored from scans and
  * reporting.
  */
-static void make_black_object(unsigned long ptr)
+static void make_black_object(unsigned long ptr, bool is_phys)
 {
-       paint_ptr(ptr, KMEMLEAK_BLACK);
+       paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
 }
 
 /*
@@ -990,7 +1043,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-               delete_object_part((unsigned long)ptr, size);
+               delete_object_part((unsigned long)ptr, size, false);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
 
@@ -1078,7 +1131,7 @@ void __ref kmemleak_ignore(const void *ptr)
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
        if (kmemleak_enabled && ptr && !IS_ERR(ptr))
-               make_black_object((unsigned long)ptr);
+               make_black_object((unsigned long)ptr, false);
 }
 EXPORT_SYMBOL(kmemleak_ignore);
 
@@ -1125,15 +1178,18 @@ EXPORT_SYMBOL(kmemleak_no_scan);
  *                      address argument
  * @phys:      physical address of the object
  * @size:      size of the object
- * @min_count: minimum number of references to this object.
- *              See kmemleak_alloc()
  * @gfp:       kmalloc() flags used for kmemleak internal memory allocations
  */
-void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
-                              gfp_t gfp)
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
 {
-       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
-               kmemleak_alloc(__va(phys), size, min_count, gfp);
+       pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
+
+       if (kmemleak_enabled)
+               /*
+                * Create object with OBJECT_PHYS flag and
+                * assume min_count 0.
+                */
+               create_object_phys((unsigned long)phys, size, 0, gfp);
 }
 EXPORT_SYMBOL(kmemleak_alloc_phys);
 
@@ -1146,22 +1202,12 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
  */
 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
 {
-       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
-               kmemleak_free_part(__va(phys), size);
-}
-EXPORT_SYMBOL(kmemleak_free_part_phys);
+       pr_debug("%s(0x%pa)\n", __func__, &phys);
 
-/**
- * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
- *                         address argument
- * @phys:      physical address of the object
- */
-void __ref kmemleak_not_leak_phys(phys_addr_t phys)
-{
-       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
-               kmemleak_not_leak(__va(phys));
+       if (kmemleak_enabled)
+               delete_object_part((unsigned long)phys, size, true);
 }
-EXPORT_SYMBOL(kmemleak_not_leak_phys);
+EXPORT_SYMBOL(kmemleak_free_part_phys);
 
 /**
  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
@@ -1170,8 +1216,10 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
  */
 void __ref kmemleak_ignore_phys(phys_addr_t phys)
 {
-       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
-               kmemleak_ignore(__va(phys));
+       pr_debug("%s(0x%pa)\n", __func__, &phys);
+
+       if (kmemleak_enabled)
+               make_black_object((unsigned long)phys, true);
 }
 EXPORT_SYMBOL(kmemleak_ignore_phys);
 
@@ -1182,6 +1230,9 @@ static bool update_checksum(struct kmemleak_object *object)
 {
        u32 old_csum = object->checksum;
 
+       if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+               return false;
+
        kasan_disable_current();
        kcsan_disable_current();
        object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
@@ -1335,6 +1386,7 @@ static void scan_object(struct kmemleak_object *object)
 {
        struct kmemleak_scan_area *area;
        unsigned long flags;
+       void *obj_ptr;
 
        /*
         * Once the object->lock is acquired, the corresponding memory block
@@ -1346,10 +1398,15 @@ static void scan_object(struct kmemleak_object *object)
        if (!(object->flags & OBJECT_ALLOCATED))
                /* already freed object */
                goto out;
+
+       obj_ptr = object->flags & OBJECT_PHYS ?
+                 __va((phys_addr_t)object->pointer) :
+                 (void *)object->pointer;
+
        if (hlist_empty(&object->area_list) ||
            object->flags & OBJECT_FULL_SCAN) {
-               void *start = (void *)object->pointer;
-               void *end = (void *)(object->pointer + object->size);
+               void *start = obj_ptr;
+               void *end = obj_ptr + object->size;
                void *next;
 
                do {
@@ -1413,18 +1470,21 @@ static void scan_gray_list(void)
  */
 static void kmemleak_scan(void)
 {
-       unsigned long flags;
        struct kmemleak_object *object;
        struct zone *zone;
        int __maybe_unused i;
        int new_leaks = 0;
+       int loop1_cnt = 0;
 
        jiffies_last_scan = jiffies;
 
        /* prepare the kmemleak_object's */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               raw_spin_lock_irqsave(&object->lock, flags);
+               bool obj_pinned = false;
+
+               loop1_cnt++;
+               raw_spin_lock_irq(&object->lock);
 #ifdef DEBUG
                /*
                 * With a few exceptions there should be a maximum of
@@ -1436,12 +1496,45 @@ static void kmemleak_scan(void)
                        dump_object_info(object);
                }
 #endif
+
+               /* ignore objects outside lowmem (paint them black) */
+               if ((object->flags & OBJECT_PHYS) &&
+                  !(object->flags & OBJECT_NO_SCAN)) {
+                       unsigned long phys = object->pointer;
+
+                       if (PHYS_PFN(phys) < min_low_pfn ||
+                           PHYS_PFN(phys + object->size) >= max_low_pfn)
+                               __paint_it(object, KMEMLEAK_BLACK);
+               }
+
                /* reset the reference count (whiten the object) */
                object->count = 0;
-               if (color_gray(object) && get_object(object))
+               if (color_gray(object) && get_object(object)) {
                        list_add_tail(&object->gray_list, &gray_list);
+                       obj_pinned = true;
+               }
 
-               raw_spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irq(&object->lock);
+
+               /*
+                * Do a cond_resched() to avoid soft lockup every 64k objects.
+                * Make sure a reference has been taken so that the object
+                * won't go away without RCU read lock.
+                */
+               if (!(loop1_cnt & 0xffff)) {
+                       if (!obj_pinned && !get_object(object)) {
+                               /* Try the next object instead */
+                               loop1_cnt--;
+                               continue;
+                       }
+
+                       rcu_read_unlock();
+                       cond_resched();
+                       rcu_read_lock();
+
+                       if (!obj_pinned)
+                               put_object(object);
+               }
        }
        rcu_read_unlock();
 
@@ -1509,14 +1602,21 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               raw_spin_lock_irqsave(&object->lock, flags);
+               /*
+                * This is racy but we can save the overhead of lock/unlock
+                * calls. The missed objects, if any, should be caught in
+                * the next scan.
+                */
+               if (!color_white(object))
+                       continue;
+               raw_spin_lock_irq(&object->lock);
                if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
                    && update_checksum(object) && get_object(object)) {
                        /* color it gray temporarily */
                        object->count = object->min_count;
                        list_add_tail(&object->gray_list, &gray_list);
                }
-               raw_spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irq(&object->lock);
        }
        rcu_read_unlock();
 
@@ -1536,7 +1636,14 @@ static void kmemleak_scan(void)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               raw_spin_lock_irqsave(&object->lock, flags);
+               /*
+                * This is racy but we can save the overhead of lock/unlock
+                * calls. The missed objects, if any, should be caught in
+                * the next scan.
+                */
+               if (!color_white(object))
+                       continue;
+               raw_spin_lock_irq(&object->lock);
                if (unreferenced_object(object) &&
                    !(object->flags & OBJECT_REPORTED)) {
                        object->flags |= OBJECT_REPORTED;
@@ -1546,7 +1653,7 @@ static void kmemleak_scan(void)
 
                        new_leaks++;
                }
-               raw_spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irq(&object->lock);
        }
        rcu_read_unlock();
 
@@ -1748,15 +1855,14 @@ static int dump_str_object_info(const char *str)
 static void kmemleak_clear(void)
 {
        struct kmemleak_object *object;
-       unsigned long flags;
 
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
-               raw_spin_lock_irqsave(&object->lock, flags);
+               raw_spin_lock_irq(&object->lock);
                if ((object->flags & OBJECT_REPORTED) &&
                    unreferenced_object(object))
                        __paint_it(object, KMEMLEAK_GREY);
-               raw_spin_unlock_irqrestore(&object->lock, flags);
+               raw_spin_unlock_irq(&object->lock);
        }
        rcu_read_unlock();