Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-microblaze.git] / mm / kmemleak.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17  *   del_state modifications and accesses to the object trees
18  *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19  *   object_list is the main list holding the metadata (struct
20  *   kmemleak_object) for the allocated memory blocks. The object trees are
21  *   red black trees used to look-up metadata based on a pointer to the
22  *   corresponding memory block. The kmemleak_object structures are added to
23  *   the object_list and the object tree root in the create_object() function
24  *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25  *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
28  *   that some members of this structure may be protected by other means
29  *   (atomic or kmemleak_lock). This lock is also held when scanning the
30  *   corresponding memory block to avoid the kernel freeing it via the
31  *   kmemleak_free() callback. This is less heavyweight than holding a global
32  *   lock like kmemleak_lock during scanning.
33  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34  *   unreferenced objects at a time. The gray_list contains the objects which
35  *   are already referenced or marked as false positives and need to be
36  *   scanned. This list is only modified during a scanning episode when the
37  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
38  *   Note that the kmemleak_object.use_count is incremented when an object is
39  *   added to the gray_list and therefore cannot be freed. This mutex also
40  *   prevents multiple users of the "kmemleak" debugfs file together with
41  *   modifications to the memory scanning parameters including the scan_thread
42  *   pointer
43  *
44  * Locks and mutexes are acquired/nested in the following order:
45  *
46  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47  *
48  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49  * regions.
50  *
51  * The kmemleak_object structures have a use_count incremented or decremented
52  * using the get_object()/put_object() functions. When the use_count becomes
53  * 0, this count can no longer be incremented and put_object() schedules the
54  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55  * function must be protected by rcu_read_lock() to avoid accessing a freed
56  * structure.
57  */
58
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107  * Kmemleak configuration and common defines.
108  */
109 #define MAX_TRACE               16      /* stack trace length */
110 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
112 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER       sizeof(void *)
116
117 /* GFP bitmask for kmemleak internal allocations */
118 #define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
119                                            __GFP_NOLOCKDEP)) | \
120                                  __GFP_NORETRY | __GFP_NOMEMALLOC | \
121                                  __GFP_NOWARN)
122
123 /* scanning area inside a memory block */
124 struct kmemleak_scan_area {
125         struct hlist_node node;
126         unsigned long start;
127         size_t size;
128 };
129
130 #define KMEMLEAK_GREY   0
131 #define KMEMLEAK_BLACK  -1
132
133 /*
134  * Structure holding the metadata for each allocated memory block.
135  * Modifications to such objects should be made while holding the
136  * object->lock. Insertions or deletions from object_list, gray_list or
137  * rb_node are already protected by the corresponding locks or mutex (see
138  * the notes on locking above). These objects are reference-counted
139  * (use_count) and freed using the RCU mechanism.
140  */
141 struct kmemleak_object {
142         raw_spinlock_t lock;
143         unsigned int flags;             /* object status flags */
144         struct list_head object_list;
145         struct list_head gray_list;
146         struct rb_node rb_node;
147         struct rcu_head rcu;            /* object_list lockless traversal */
148         /* object usage count; object freed when use_count == 0 */
149         atomic_t use_count;
150         unsigned int del_state;         /* deletion state */
151         unsigned long pointer;
152         size_t size;
153         /* pass surplus references to this pointer */
154         unsigned long excess_ref;
155         /* minimum number of a pointers found before it is considered leak */
156         int min_count;
157         /* the total number of pointers found pointing to this object */
158         int count;
159         /* checksum for detecting modified objects */
160         u32 checksum;
161         /* memory ranges to be scanned inside an object (empty for all) */
162         struct hlist_head area_list;
163         depot_stack_handle_t trace_handle;
164         unsigned long jiffies;          /* creation timestamp */
165         pid_t pid;                      /* pid of the current task */
166         char comm[TASK_COMM_LEN];       /* executable name */
167 };
168
169 /* flag representing the memory block allocation status */
170 #define OBJECT_ALLOCATED        (1 << 0)
171 /* flag set after the first reporting of an unreference object */
172 #define OBJECT_REPORTED         (1 << 1)
173 /* flag set to not scan the object */
174 #define OBJECT_NO_SCAN          (1 << 2)
175 /* flag set to fully scan the object when scan_area allocation failed */
176 #define OBJECT_FULL_SCAN        (1 << 3)
177 /* flag set for object allocated with physical address */
178 #define OBJECT_PHYS             (1 << 4)
179 /* flag set for per-CPU pointers */
180 #define OBJECT_PERCPU           (1 << 5)
181
182 /* set when __remove_object() called */
183 #define DELSTATE_REMOVED        (1 << 0)
184 /* set to temporarily prevent deletion from object_list */
185 #define DELSTATE_NO_DELETE      (1 << 1)
186
187 #define HEX_PREFIX              "    "
188 /* number of bytes to print per line; must be 16 or 32 */
189 #define HEX_ROW_SIZE            16
190 /* number of bytes to print at a time (1, 2, 4, 8) */
191 #define HEX_GROUP_SIZE          1
192 /* include ASCII after the hex output */
193 #define HEX_ASCII               1
194 /* max number of lines to be printed */
195 #define HEX_MAX_LINES           2
196
197 /* the list of all allocated objects */
198 static LIST_HEAD(object_list);
199 /* the list of gray-colored objects (see color_gray comment below) */
200 static LIST_HEAD(gray_list);
201 /* memory pool allocation */
202 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
203 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
204 static LIST_HEAD(mem_pool_free_list);
205 /* search tree for object boundaries */
206 static struct rb_root object_tree_root = RB_ROOT;
207 /* search tree for object (with OBJECT_PHYS flag) boundaries */
208 static struct rb_root object_phys_tree_root = RB_ROOT;
209 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
210 static struct rb_root object_percpu_tree_root = RB_ROOT;
211 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
212 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
213
214 /* allocation caches for kmemleak internal data */
215 static struct kmem_cache *object_cache;
216 static struct kmem_cache *scan_area_cache;
217
218 /* set if tracing memory operations is enabled */
219 static int kmemleak_enabled = 1;
220 /* same as above but only for the kmemleak_free() callback */
221 static int kmemleak_free_enabled = 1;
222 /* set in the late_initcall if there were no errors */
223 static int kmemleak_late_initialized;
224 /* set if a kmemleak warning was issued */
225 static int kmemleak_warning;
226 /* set if a fatal kmemleak error has occurred */
227 static int kmemleak_error;
228
229 /* minimum and maximum address that may be valid pointers */
230 static unsigned long min_addr = ULONG_MAX;
231 static unsigned long max_addr;
232
233 static struct task_struct *scan_thread;
234 /* used to avoid reporting of recently allocated objects */
235 static unsigned long jiffies_min_age;
236 static unsigned long jiffies_last_scan;
237 /* delay between automatic memory scannings */
238 static unsigned long jiffies_scan_wait;
239 /* enables or disables the task stacks scanning */
240 static int kmemleak_stack_scan = 1;
241 /* protects the memory scanning, parameters and debug/kmemleak file access */
242 static DEFINE_MUTEX(scan_mutex);
243 /* setting kmemleak=on, will set this var, skipping the disable */
244 static int kmemleak_skip_disable;
245 /* If there are leaks that can be reported */
246 static bool kmemleak_found_leaks;
247
248 static bool kmemleak_verbose;
249 module_param_named(verbose, kmemleak_verbose, bool, 0600);
250
251 static void kmemleak_disable(void);
252
253 /*
254  * Print a warning and dump the stack trace.
255  */
256 #define kmemleak_warn(x...)     do {            \
257         pr_warn(x);                             \
258         dump_stack();                           \
259         kmemleak_warning = 1;                   \
260 } while (0)
261
262 /*
263  * Macro invoked when a serious kmemleak condition occurred and cannot be
264  * recovered from. Kmemleak will be disabled and further allocation/freeing
265  * tracing no longer available.
266  */
267 #define kmemleak_stop(x...)     do {    \
268         kmemleak_warn(x);               \
269         kmemleak_disable();             \
270 } while (0)
271
272 #define warn_or_seq_printf(seq, fmt, ...)       do {    \
273         if (seq)                                        \
274                 seq_printf(seq, fmt, ##__VA_ARGS__);    \
275         else                                            \
276                 pr_warn(fmt, ##__VA_ARGS__);            \
277 } while (0)
278
279 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
280                                  int rowsize, int groupsize, const void *buf,
281                                  size_t len, bool ascii)
282 {
283         if (seq)
284                 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
285                              buf, len, ascii);
286         else
287                 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
288                                rowsize, groupsize, buf, len, ascii);
289 }
290
291 /*
292  * Printing of the objects hex dump to the seq file. The number of lines to be
293  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
294  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
295  * with the object->lock held.
296  */
297 static void hex_dump_object(struct seq_file *seq,
298                             struct kmemleak_object *object)
299 {
300         const u8 *ptr = (const u8 *)object->pointer;
301         size_t len;
302
303         if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
304                 return;
305
306         /* limit the number of lines to HEX_MAX_LINES */
307         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
308
309         warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
310         kasan_disable_current();
311         warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
312                              HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
313         kasan_enable_current();
314 }
315
316 /*
317  * Object colors, encoded with count and min_count:
318  * - white - orphan object, not enough references to it (count < min_count)
319  * - gray  - not orphan, not marked as false positive (min_count == 0) or
320  *              sufficient references to it (count >= min_count)
321  * - black - ignore, it doesn't contain references (e.g. text section)
322  *              (min_count == -1). No function defined for this color.
323  * Newly created objects don't have any color assigned (object->count == -1)
324  * before the next memory scan when they become white.
325  */
326 static bool color_white(const struct kmemleak_object *object)
327 {
328         return object->count != KMEMLEAK_BLACK &&
329                 object->count < object->min_count;
330 }
331
332 static bool color_gray(const struct kmemleak_object *object)
333 {
334         return object->min_count != KMEMLEAK_BLACK &&
335                 object->count >= object->min_count;
336 }
337
338 /*
339  * Objects are considered unreferenced only if their color is white, they have
340  * not be deleted and have a minimum age to avoid false positives caused by
341  * pointers temporarily stored in CPU registers.
342  */
343 static bool unreferenced_object(struct kmemleak_object *object)
344 {
345         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
346                 time_before_eq(object->jiffies + jiffies_min_age,
347                                jiffies_last_scan);
348 }
349
350 /*
351  * Printing of the unreferenced objects information to the seq file. The
352  * print_unreferenced function must be called with the object->lock held.
353  */
354 static void print_unreferenced(struct seq_file *seq,
355                                struct kmemleak_object *object)
356 {
357         int i;
358         unsigned long *entries;
359         unsigned int nr_entries;
360
361         nr_entries = stack_depot_fetch(object->trace_handle, &entries);
362         warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
363                           object->pointer, object->size);
364         warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
365                            object->comm, object->pid, object->jiffies);
366         hex_dump_object(seq, object);
367         warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
368
369         for (i = 0; i < nr_entries; i++) {
370                 void *ptr = (void *)entries[i];
371                 warn_or_seq_printf(seq, "    [<%pK>] %pS\n", ptr, ptr);
372         }
373 }
374
375 /*
376  * Print the kmemleak_object information. This function is used mainly for
377  * debugging special cases when kmemleak operations. It must be called with
378  * the object->lock held.
379  */
380 static void dump_object_info(struct kmemleak_object *object)
381 {
382         pr_notice("Object 0x%08lx (size %zu):\n",
383                         object->pointer, object->size);
384         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
385                         object->comm, object->pid, object->jiffies);
386         pr_notice("  min_count = %d\n", object->min_count);
387         pr_notice("  count = %d\n", object->count);
388         pr_notice("  flags = 0x%x\n", object->flags);
389         pr_notice("  checksum = %u\n", object->checksum);
390         pr_notice("  backtrace:\n");
391         if (object->trace_handle)
392                 stack_depot_print(object->trace_handle);
393 }
394
395 static struct rb_root *object_tree(unsigned long objflags)
396 {
397         if (objflags & OBJECT_PHYS)
398                 return &object_phys_tree_root;
399         if (objflags & OBJECT_PERCPU)
400                 return &object_percpu_tree_root;
401         return &object_tree_root;
402 }
403
404 /*
405  * Look-up a memory block metadata (kmemleak_object) in the object search
406  * tree based on a pointer value. If alias is 0, only values pointing to the
407  * beginning of the memory block are allowed. The kmemleak_lock must be held
408  * when calling this function.
409  */
410 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
411                                                unsigned int objflags)
412 {
413         struct rb_node *rb = object_tree(objflags)->rb_node;
414         unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
415
416         while (rb) {
417                 struct kmemleak_object *object;
418                 unsigned long untagged_objp;
419
420                 object = rb_entry(rb, struct kmemleak_object, rb_node);
421                 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
422
423                 if (untagged_ptr < untagged_objp)
424                         rb = object->rb_node.rb_left;
425                 else if (untagged_objp + object->size <= untagged_ptr)
426                         rb = object->rb_node.rb_right;
427                 else if (untagged_objp == untagged_ptr || alias)
428                         return object;
429                 else {
430                         kmemleak_warn("Found object by alias at 0x%08lx\n",
431                                       ptr);
432                         dump_object_info(object);
433                         break;
434                 }
435         }
436         return NULL;
437 }
438
439 /* Look-up a kmemleak object which allocated with virtual address. */
440 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
441 {
442         return __lookup_object(ptr, alias, 0);
443 }
444
445 /*
446  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
447  * that once an object's use_count reached 0, the RCU freeing was already
448  * registered and the object should no longer be used. This function must be
449  * called under the protection of rcu_read_lock().
450  */
451 static int get_object(struct kmemleak_object *object)
452 {
453         return atomic_inc_not_zero(&object->use_count);
454 }
455
456 /*
457  * Memory pool allocation and freeing. kmemleak_lock must not be held.
458  */
459 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
460 {
461         unsigned long flags;
462         struct kmemleak_object *object;
463
464         /* try the slab allocator first */
465         if (object_cache) {
466                 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
467                 if (object)
468                         return object;
469         }
470
471         /* slab allocation failed, try the memory pool */
472         raw_spin_lock_irqsave(&kmemleak_lock, flags);
473         object = list_first_entry_or_null(&mem_pool_free_list,
474                                           typeof(*object), object_list);
475         if (object)
476                 list_del(&object->object_list);
477         else if (mem_pool_free_count)
478                 object = &mem_pool[--mem_pool_free_count];
479         else
480                 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
481         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
482
483         return object;
484 }
485
486 /*
487  * Return the object to either the slab allocator or the memory pool.
488  */
489 static void mem_pool_free(struct kmemleak_object *object)
490 {
491         unsigned long flags;
492
493         if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
494                 kmem_cache_free(object_cache, object);
495                 return;
496         }
497
498         /* add the object to the memory pool free list */
499         raw_spin_lock_irqsave(&kmemleak_lock, flags);
500         list_add(&object->object_list, &mem_pool_free_list);
501         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
502 }
503
504 /*
505  * RCU callback to free a kmemleak_object.
506  */
507 static void free_object_rcu(struct rcu_head *rcu)
508 {
509         struct hlist_node *tmp;
510         struct kmemleak_scan_area *area;
511         struct kmemleak_object *object =
512                 container_of(rcu, struct kmemleak_object, rcu);
513
514         /*
515          * Once use_count is 0 (guaranteed by put_object), there is no other
516          * code accessing this object, hence no need for locking.
517          */
518         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
519                 hlist_del(&area->node);
520                 kmem_cache_free(scan_area_cache, area);
521         }
522         mem_pool_free(object);
523 }
524
525 /*
526  * Decrement the object use_count. Once the count is 0, free the object using
527  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
528  * delete_object() path, the delayed RCU freeing ensures that there is no
529  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
530  * is also possible.
531  */
532 static void put_object(struct kmemleak_object *object)
533 {
534         if (!atomic_dec_and_test(&object->use_count))
535                 return;
536
537         /* should only get here after delete_object was called */
538         WARN_ON(object->flags & OBJECT_ALLOCATED);
539
540         /*
541          * It may be too early for the RCU callbacks, however, there is no
542          * concurrent object_list traversal when !object_cache and all objects
543          * came from the memory pool. Free the object directly.
544          */
545         if (object_cache)
546                 call_rcu(&object->rcu, free_object_rcu);
547         else
548                 free_object_rcu(&object->rcu);
549 }
550
551 /*
552  * Look up an object in the object search tree and increase its use_count.
553  */
554 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
555                                                      unsigned int objflags)
556 {
557         unsigned long flags;
558         struct kmemleak_object *object;
559
560         rcu_read_lock();
561         raw_spin_lock_irqsave(&kmemleak_lock, flags);
562         object = __lookup_object(ptr, alias, objflags);
563         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
564
565         /* check whether the object is still available */
566         if (object && !get_object(object))
567                 object = NULL;
568         rcu_read_unlock();
569
570         return object;
571 }
572
573 /* Look up and get an object which allocated with virtual address. */
574 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
575 {
576         return __find_and_get_object(ptr, alias, 0);
577 }
578
579 /*
580  * Remove an object from its object tree and object_list. Must be called with
581  * the kmemleak_lock held _if_ kmemleak is still enabled.
582  */
583 static void __remove_object(struct kmemleak_object *object)
584 {
585         rb_erase(&object->rb_node, object_tree(object->flags));
586         if (!(object->del_state & DELSTATE_NO_DELETE))
587                 list_del_rcu(&object->object_list);
588         object->del_state |= DELSTATE_REMOVED;
589 }
590
591 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
592                                                         int alias,
593                                                         unsigned int objflags)
594 {
595         struct kmemleak_object *object;
596
597         object = __lookup_object(ptr, alias, objflags);
598         if (object)
599                 __remove_object(object);
600
601         return object;
602 }
603
604 /*
605  * Look up an object in the object search tree and remove it from both object
606  * tree root and object_list. The returned object's use_count should be at
607  * least 1, as initially set by create_object().
608  */
609 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
610                                                       unsigned int objflags)
611 {
612         unsigned long flags;
613         struct kmemleak_object *object;
614
615         raw_spin_lock_irqsave(&kmemleak_lock, flags);
616         object = __find_and_remove_object(ptr, alias, objflags);
617         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
618
619         return object;
620 }
621
622 static noinline depot_stack_handle_t set_track_prepare(void)
623 {
624         depot_stack_handle_t trace_handle;
625         unsigned long entries[MAX_TRACE];
626         unsigned int nr_entries;
627
628         /*
629          * Use object_cache to determine whether kmemleak_init() has
630          * been invoked. stack_depot_early_init() is called before
631          * kmemleak_init() in mm_core_init().
632          */
633         if (!object_cache)
634                 return 0;
635         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
636         trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
637
638         return trace_handle;
639 }
640
641 static struct kmemleak_object *__alloc_object(gfp_t gfp)
642 {
643         struct kmemleak_object *object;
644
645         object = mem_pool_alloc(gfp);
646         if (!object) {
647                 pr_warn("Cannot allocate a kmemleak_object structure\n");
648                 kmemleak_disable();
649                 return NULL;
650         }
651
652         INIT_LIST_HEAD(&object->object_list);
653         INIT_LIST_HEAD(&object->gray_list);
654         INIT_HLIST_HEAD(&object->area_list);
655         raw_spin_lock_init(&object->lock);
656         atomic_set(&object->use_count, 1);
657         object->excess_ref = 0;
658         object->count = 0;                      /* white color initially */
659         object->checksum = 0;
660         object->del_state = 0;
661
662         /* task information */
663         if (in_hardirq()) {
664                 object->pid = 0;
665                 strncpy(object->comm, "hardirq", sizeof(object->comm));
666         } else if (in_serving_softirq()) {
667                 object->pid = 0;
668                 strncpy(object->comm, "softirq", sizeof(object->comm));
669         } else {
670                 object->pid = current->pid;
671                 /*
672                  * There is a small chance of a race with set_task_comm(),
673                  * however using get_task_comm() here may cause locking
674                  * dependency issues with current->alloc_lock. In the worst
675                  * case, the command line is not correct.
676                  */
677                 strncpy(object->comm, current->comm, sizeof(object->comm));
678         }
679
680         /* kernel backtrace */
681         object->trace_handle = set_track_prepare();
682
683         return object;
684 }
685
686 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
687                          size_t size, int min_count, unsigned int objflags)
688 {
689
690         struct kmemleak_object *parent;
691         struct rb_node **link, *rb_parent;
692         unsigned long untagged_ptr;
693         unsigned long untagged_objp;
694
695         object->flags = OBJECT_ALLOCATED | objflags;
696         object->pointer = ptr;
697         object->size = kfence_ksize((void *)ptr) ?: size;
698         object->min_count = min_count;
699         object->jiffies = jiffies;
700
701         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
702         /*
703          * Only update min_addr and max_addr with object
704          * storing virtual address.
705          */
706         if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
707                 min_addr = min(min_addr, untagged_ptr);
708                 max_addr = max(max_addr, untagged_ptr + size);
709         }
710         link = &object_tree(objflags)->rb_node;
711         rb_parent = NULL;
712         while (*link) {
713                 rb_parent = *link;
714                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
715                 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
716                 if (untagged_ptr + size <= untagged_objp)
717                         link = &parent->rb_node.rb_left;
718                 else if (untagged_objp + parent->size <= untagged_ptr)
719                         link = &parent->rb_node.rb_right;
720                 else {
721                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
722                                       ptr);
723                         /*
724                          * No need for parent->lock here since "parent" cannot
725                          * be freed while the kmemleak_lock is held.
726                          */
727                         dump_object_info(parent);
728                         return -EEXIST;
729                 }
730         }
731         rb_link_node(&object->rb_node, rb_parent, link);
732         rb_insert_color(&object->rb_node, object_tree(objflags));
733         list_add_tail_rcu(&object->object_list, &object_list);
734
735         return 0;
736 }
737
738 /*
739  * Create the metadata (struct kmemleak_object) corresponding to an allocated
740  * memory block and add it to the object_list and object tree.
741  */
742 static void __create_object(unsigned long ptr, size_t size,
743                                 int min_count, gfp_t gfp, unsigned int objflags)
744 {
745         struct kmemleak_object *object;
746         unsigned long flags;
747         int ret;
748
749         object = __alloc_object(gfp);
750         if (!object)
751                 return;
752
753         raw_spin_lock_irqsave(&kmemleak_lock, flags);
754         ret = __link_object(object, ptr, size, min_count, objflags);
755         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
756         if (ret)
757                 mem_pool_free(object);
758 }
759
760 /* Create kmemleak object which allocated with virtual address. */
761 static void create_object(unsigned long ptr, size_t size,
762                           int min_count, gfp_t gfp)
763 {
764         __create_object(ptr, size, min_count, gfp, 0);
765 }
766
767 /* Create kmemleak object which allocated with physical address. */
768 static void create_object_phys(unsigned long ptr, size_t size,
769                                int min_count, gfp_t gfp)
770 {
771         __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
772 }
773
774 /* Create kmemleak object corresponding to a per-CPU allocation. */
775 static void create_object_percpu(unsigned long ptr, size_t size,
776                                  int min_count, gfp_t gfp)
777 {
778         __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
779 }
780
781 /*
782  * Mark the object as not allocated and schedule RCU freeing via put_object().
783  */
784 static void __delete_object(struct kmemleak_object *object)
785 {
786         unsigned long flags;
787
788         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
789         WARN_ON(atomic_read(&object->use_count) < 1);
790
791         /*
792          * Locking here also ensures that the corresponding memory block
793          * cannot be freed when it is being scanned.
794          */
795         raw_spin_lock_irqsave(&object->lock, flags);
796         object->flags &= ~OBJECT_ALLOCATED;
797         raw_spin_unlock_irqrestore(&object->lock, flags);
798         put_object(object);
799 }
800
801 /*
802  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
803  * delete it.
804  */
805 static void delete_object_full(unsigned long ptr, unsigned int objflags)
806 {
807         struct kmemleak_object *object;
808
809         object = find_and_remove_object(ptr, 0, objflags);
810         if (!object) {
811 #ifdef DEBUG
812                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
813                               ptr);
814 #endif
815                 return;
816         }
817         __delete_object(object);
818 }
819
820 /*
821  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
822  * delete it. If the memory block is partially freed, the function may create
823  * additional metadata for the remaining parts of the block.
824  */
825 static void delete_object_part(unsigned long ptr, size_t size,
826                                unsigned int objflags)
827 {
828         struct kmemleak_object *object, *object_l, *object_r;
829         unsigned long start, end, flags;
830
831         object_l = __alloc_object(GFP_KERNEL);
832         if (!object_l)
833                 return;
834
835         object_r = __alloc_object(GFP_KERNEL);
836         if (!object_r)
837                 goto out;
838
839         raw_spin_lock_irqsave(&kmemleak_lock, flags);
840         object = __find_and_remove_object(ptr, 1, objflags);
841         if (!object) {
842 #ifdef DEBUG
843                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
844                               ptr, size);
845 #endif
846                 goto unlock;
847         }
848
849         /*
850          * Create one or two objects that may result from the memory block
851          * split. Note that partial freeing is only done by free_bootmem() and
852          * this happens before kmemleak_init() is called.
853          */
854         start = object->pointer;
855         end = object->pointer + object->size;
856         if ((ptr > start) &&
857             !__link_object(object_l, start, ptr - start,
858                            object->min_count, objflags))
859                 object_l = NULL;
860         if ((ptr + size < end) &&
861             !__link_object(object_r, ptr + size, end - ptr - size,
862                            object->min_count, objflags))
863                 object_r = NULL;
864
865 unlock:
866         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
867         if (object)
868                 __delete_object(object);
869
870 out:
871         if (object_l)
872                 mem_pool_free(object_l);
873         if (object_r)
874                 mem_pool_free(object_r);
875 }
876
877 static void __paint_it(struct kmemleak_object *object, int color)
878 {
879         object->min_count = color;
880         if (color == KMEMLEAK_BLACK)
881                 object->flags |= OBJECT_NO_SCAN;
882 }
883
884 static void paint_it(struct kmemleak_object *object, int color)
885 {
886         unsigned long flags;
887
888         raw_spin_lock_irqsave(&object->lock, flags);
889         __paint_it(object, color);
890         raw_spin_unlock_irqrestore(&object->lock, flags);
891 }
892
893 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
894 {
895         struct kmemleak_object *object;
896
897         object = __find_and_get_object(ptr, 0, objflags);
898         if (!object) {
899                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
900                               ptr,
901                               (color == KMEMLEAK_GREY) ? "Grey" :
902                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
903                 return;
904         }
905         paint_it(object, color);
906         put_object(object);
907 }
908
909 /*
910  * Mark an object permanently as gray-colored so that it can no longer be
911  * reported as a leak. This is used in general to mark a false positive.
912  */
913 static void make_gray_object(unsigned long ptr)
914 {
915         paint_ptr(ptr, KMEMLEAK_GREY, 0);
916 }
917
918 /*
919  * Mark the object as black-colored so that it is ignored from scans and
920  * reporting.
921  */
922 static void make_black_object(unsigned long ptr, unsigned int objflags)
923 {
924         paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
925 }
926
927 /*
928  * Add a scanning area to the object. If at least one such area is added,
929  * kmemleak will only scan these ranges rather than the whole memory block.
930  */
931 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
932 {
933         unsigned long flags;
934         struct kmemleak_object *object;
935         struct kmemleak_scan_area *area = NULL;
936         unsigned long untagged_ptr;
937         unsigned long untagged_objp;
938
939         object = find_and_get_object(ptr, 1);
940         if (!object) {
941                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
942                               ptr);
943                 return;
944         }
945
946         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
947         untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
948
949         if (scan_area_cache)
950                 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
951
952         raw_spin_lock_irqsave(&object->lock, flags);
953         if (!area) {
954                 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
955                 /* mark the object for full scan to avoid false positives */
956                 object->flags |= OBJECT_FULL_SCAN;
957                 goto out_unlock;
958         }
959         if (size == SIZE_MAX) {
960                 size = untagged_objp + object->size - untagged_ptr;
961         } else if (untagged_ptr + size > untagged_objp + object->size) {
962                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
963                 dump_object_info(object);
964                 kmem_cache_free(scan_area_cache, area);
965                 goto out_unlock;
966         }
967
968         INIT_HLIST_NODE(&area->node);
969         area->start = ptr;
970         area->size = size;
971
972         hlist_add_head(&area->node, &object->area_list);
973 out_unlock:
974         raw_spin_unlock_irqrestore(&object->lock, flags);
975         put_object(object);
976 }
977
978 /*
979  * Any surplus references (object already gray) to 'ptr' are passed to
980  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
981  * vm_struct may be used as an alternative reference to the vmalloc'ed object
982  * (see free_thread_stack()).
983  */
984 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
985 {
986         unsigned long flags;
987         struct kmemleak_object *object;
988
989         object = find_and_get_object(ptr, 0);
990         if (!object) {
991                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
992                               ptr);
993                 return;
994         }
995
996         raw_spin_lock_irqsave(&object->lock, flags);
997         object->excess_ref = excess_ref;
998         raw_spin_unlock_irqrestore(&object->lock, flags);
999         put_object(object);
1000 }
1001
1002 /*
1003  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1004  * pointer. Such object will not be scanned by kmemleak but references to it
1005  * are searched.
1006  */
1007 static void object_no_scan(unsigned long ptr)
1008 {
1009         unsigned long flags;
1010         struct kmemleak_object *object;
1011
1012         object = find_and_get_object(ptr, 0);
1013         if (!object) {
1014                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1015                 return;
1016         }
1017
1018         raw_spin_lock_irqsave(&object->lock, flags);
1019         object->flags |= OBJECT_NO_SCAN;
1020         raw_spin_unlock_irqrestore(&object->lock, flags);
1021         put_object(object);
1022 }
1023
1024 /**
1025  * kmemleak_alloc - register a newly allocated object
1026  * @ptr:        pointer to beginning of the object
1027  * @size:       size of the object
1028  * @min_count:  minimum number of references to this object. If during memory
1029  *              scanning a number of references less than @min_count is found,
1030  *              the object is reported as a memory leak. If @min_count is 0,
1031  *              the object is never reported as a leak. If @min_count is -1,
1032  *              the object is ignored (not scanned and not reported as a leak)
1033  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1034  *
1035  * This function is called from the kernel allocators when a new object
1036  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1037  */
1038 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1039                           gfp_t gfp)
1040 {
1041         pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1042
1043         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1044                 create_object((unsigned long)ptr, size, min_count, gfp);
1045 }
1046 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1047
1048 /**
1049  * kmemleak_alloc_percpu - register a newly allocated __percpu object
1050  * @ptr:        __percpu pointer to beginning of the object
1051  * @size:       size of the object
1052  * @gfp:        flags used for kmemleak internal memory allocations
1053  *
1054  * This function is called from the kernel percpu allocator when a new object
1055  * (memory block) is allocated (alloc_percpu).
1056  */
1057 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1058                                  gfp_t gfp)
1059 {
1060         pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1061
1062         /*
1063          * Percpu allocations are only scanned and not reported as leaks
1064          * (min_count is set to 0).
1065          */
1066         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1067                 create_object_percpu((unsigned long)ptr, size, 0, gfp);
1068 }
1069 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1070
1071 /**
1072  * kmemleak_vmalloc - register a newly vmalloc'ed object
1073  * @area:       pointer to vm_struct
1074  * @size:       size of the object
1075  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
1076  *
1077  * This function is called from the vmalloc() kernel allocator when a new
1078  * object (memory block) is allocated.
1079  */
1080 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1081 {
1082         pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1083
1084         /*
1085          * A min_count = 2 is needed because vm_struct contains a reference to
1086          * the virtual address of the vmalloc'ed block.
1087          */
1088         if (kmemleak_enabled) {
1089                 create_object((unsigned long)area->addr, size, 2, gfp);
1090                 object_set_excess_ref((unsigned long)area,
1091                                       (unsigned long)area->addr);
1092         }
1093 }
1094 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1095
1096 /**
1097  * kmemleak_free - unregister a previously registered object
1098  * @ptr:        pointer to beginning of the object
1099  *
1100  * This function is called from the kernel allocators when an object (memory
1101  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1102  */
1103 void __ref kmemleak_free(const void *ptr)
1104 {
1105         pr_debug("%s(0x%px)\n", __func__, ptr);
1106
1107         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1108                 delete_object_full((unsigned long)ptr, 0);
1109 }
1110 EXPORT_SYMBOL_GPL(kmemleak_free);
1111
1112 /**
1113  * kmemleak_free_part - partially unregister a previously registered object
1114  * @ptr:        pointer to the beginning or inside the object. This also
1115  *              represents the start of the range to be freed
1116  * @size:       size to be unregistered
1117  *
1118  * This function is called when only a part of a memory block is freed
1119  * (usually from the bootmem allocator).
1120  */
1121 void __ref kmemleak_free_part(const void *ptr, size_t size)
1122 {
1123         pr_debug("%s(0x%px)\n", __func__, ptr);
1124
1125         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1126                 delete_object_part((unsigned long)ptr, size, 0);
1127 }
1128 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1129
1130 /**
1131  * kmemleak_free_percpu - unregister a previously registered __percpu object
1132  * @ptr:        __percpu pointer to beginning of the object
1133  *
1134  * This function is called from the kernel percpu allocator when an object
1135  * (memory block) is freed (free_percpu).
1136  */
1137 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1138 {
1139         pr_debug("%s(0x%px)\n", __func__, ptr);
1140
1141         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1142                 delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
1143 }
1144 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1145
1146 /**
1147  * kmemleak_update_trace - update object allocation stack trace
1148  * @ptr:        pointer to beginning of the object
1149  *
1150  * Override the object allocation stack trace for cases where the actual
1151  * allocation place is not always useful.
1152  */
1153 void __ref kmemleak_update_trace(const void *ptr)
1154 {
1155         struct kmemleak_object *object;
1156         depot_stack_handle_t trace_handle;
1157         unsigned long flags;
1158
1159         pr_debug("%s(0x%px)\n", __func__, ptr);
1160
1161         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1162                 return;
1163
1164         object = find_and_get_object((unsigned long)ptr, 1);
1165         if (!object) {
1166 #ifdef DEBUG
1167                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1168                               ptr);
1169 #endif
1170                 return;
1171         }
1172
1173         trace_handle = set_track_prepare();
1174         raw_spin_lock_irqsave(&object->lock, flags);
1175         object->trace_handle = trace_handle;
1176         raw_spin_unlock_irqrestore(&object->lock, flags);
1177
1178         put_object(object);
1179 }
1180 EXPORT_SYMBOL(kmemleak_update_trace);
1181
1182 /**
1183  * kmemleak_not_leak - mark an allocated object as false positive
1184  * @ptr:        pointer to beginning of the object
1185  *
1186  * Calling this function on an object will cause the memory block to no longer
1187  * be reported as leak and always be scanned.
1188  */
1189 void __ref kmemleak_not_leak(const void *ptr)
1190 {
1191         pr_debug("%s(0x%px)\n", __func__, ptr);
1192
1193         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1194                 make_gray_object((unsigned long)ptr);
1195 }
1196 EXPORT_SYMBOL(kmemleak_not_leak);
1197
1198 /**
1199  * kmemleak_ignore - ignore an allocated object
1200  * @ptr:        pointer to beginning of the object
1201  *
1202  * Calling this function on an object will cause the memory block to be
1203  * ignored (not scanned and not reported as a leak). This is usually done when
1204  * it is known that the corresponding block is not a leak and does not contain
1205  * any references to other allocated memory blocks.
1206  */
1207 void __ref kmemleak_ignore(const void *ptr)
1208 {
1209         pr_debug("%s(0x%px)\n", __func__, ptr);
1210
1211         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1212                 make_black_object((unsigned long)ptr, 0);
1213 }
1214 EXPORT_SYMBOL(kmemleak_ignore);
1215
1216 /**
1217  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1218  * @ptr:        pointer to beginning or inside the object. This also
1219  *              represents the start of the scan area
1220  * @size:       size of the scan area
1221  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1222  *
1223  * This function is used when it is known that only certain parts of an object
1224  * contain references to other objects. Kmemleak will only scan these areas
1225  * reducing the number false negatives.
1226  */
1227 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1228 {
1229         pr_debug("%s(0x%px)\n", __func__, ptr);
1230
1231         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1232                 add_scan_area((unsigned long)ptr, size, gfp);
1233 }
1234 EXPORT_SYMBOL(kmemleak_scan_area);
1235
1236 /**
1237  * kmemleak_no_scan - do not scan an allocated object
1238  * @ptr:        pointer to beginning of the object
1239  *
1240  * This function notifies kmemleak not to scan the given memory block. Useful
1241  * in situations where it is known that the given object does not contain any
1242  * references to other objects. Kmemleak will not scan such objects reducing
1243  * the number of false negatives.
1244  */
1245 void __ref kmemleak_no_scan(const void *ptr)
1246 {
1247         pr_debug("%s(0x%px)\n", __func__, ptr);
1248
1249         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1250                 object_no_scan((unsigned long)ptr);
1251 }
1252 EXPORT_SYMBOL(kmemleak_no_scan);
1253
1254 /**
1255  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1256  *                       address argument
1257  * @phys:       physical address of the object
1258  * @size:       size of the object
1259  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1260  */
1261 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1262 {
1263         pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1264
1265         if (kmemleak_enabled)
1266                 /*
1267                  * Create object with OBJECT_PHYS flag and
1268                  * assume min_count 0.
1269                  */
1270                 create_object_phys((unsigned long)phys, size, 0, gfp);
1271 }
1272 EXPORT_SYMBOL(kmemleak_alloc_phys);
1273
1274 /**
1275  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1276  *                           physical address argument
1277  * @phys:       physical address if the beginning or inside an object. This
1278  *              also represents the start of the range to be freed
1279  * @size:       size to be unregistered
1280  */
1281 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1282 {
1283         pr_debug("%s(0x%px)\n", __func__, &phys);
1284
1285         if (kmemleak_enabled)
1286                 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1287 }
1288 EXPORT_SYMBOL(kmemleak_free_part_phys);
1289
1290 /**
1291  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1292  *                        address argument
1293  * @phys:       physical address of the object
1294  */
1295 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1296 {
1297         pr_debug("%s(0x%px)\n", __func__, &phys);
1298
1299         if (kmemleak_enabled)
1300                 make_black_object((unsigned long)phys, OBJECT_PHYS);
1301 }
1302 EXPORT_SYMBOL(kmemleak_ignore_phys);
1303
1304 /*
1305  * Update an object's checksum and return true if it was modified.
1306  */
1307 static bool update_checksum(struct kmemleak_object *object)
1308 {
1309         u32 old_csum = object->checksum;
1310
1311         if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
1312                 return false;
1313
1314         kasan_disable_current();
1315         kcsan_disable_current();
1316         object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1317         kasan_enable_current();
1318         kcsan_enable_current();
1319
1320         return object->checksum != old_csum;
1321 }
1322
1323 /*
1324  * Update an object's references. object->lock must be held by the caller.
1325  */
1326 static void update_refs(struct kmemleak_object *object)
1327 {
1328         if (!color_white(object)) {
1329                 /* non-orphan, ignored or new */
1330                 return;
1331         }
1332
1333         /*
1334          * Increase the object's reference count (number of pointers to the
1335          * memory block). If this count reaches the required minimum, the
1336          * object's color will become gray and it will be added to the
1337          * gray_list.
1338          */
1339         object->count++;
1340         if (color_gray(object)) {
1341                 /* put_object() called when removing from gray_list */
1342                 WARN_ON(!get_object(object));
1343                 list_add_tail(&object->gray_list, &gray_list);
1344         }
1345 }
1346
1347 /*
1348  * Memory scanning is a long process and it needs to be interruptible. This
1349  * function checks whether such interrupt condition occurred.
1350  */
1351 static int scan_should_stop(void)
1352 {
1353         if (!kmemleak_enabled)
1354                 return 1;
1355
1356         /*
1357          * This function may be called from either process or kthread context,
1358          * hence the need to check for both stop conditions.
1359          */
1360         if (current->mm)
1361                 return signal_pending(current);
1362         else
1363                 return kthread_should_stop();
1364
1365         return 0;
1366 }
1367
1368 /*
1369  * Scan a memory block (exclusive range) for valid pointers and add those
1370  * found to the gray list.
1371  */
1372 static void scan_block(void *_start, void *_end,
1373                        struct kmemleak_object *scanned)
1374 {
1375         unsigned long *ptr;
1376         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1377         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1378         unsigned long flags;
1379         unsigned long untagged_ptr;
1380
1381         raw_spin_lock_irqsave(&kmemleak_lock, flags);
1382         for (ptr = start; ptr < end; ptr++) {
1383                 struct kmemleak_object *object;
1384                 unsigned long pointer;
1385                 unsigned long excess_ref;
1386
1387                 if (scan_should_stop())
1388                         break;
1389
1390                 kasan_disable_current();
1391                 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1392                 kasan_enable_current();
1393
1394                 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1395                 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1396                         continue;
1397
1398                 /*
1399                  * No need for get_object() here since we hold kmemleak_lock.
1400                  * object->use_count cannot be dropped to 0 while the object
1401                  * is still present in object_tree_root and object_list
1402                  * (with updates protected by kmemleak_lock).
1403                  */
1404                 object = lookup_object(pointer, 1);
1405                 if (!object)
1406                         continue;
1407                 if (object == scanned)
1408                         /* self referenced, ignore */
1409                         continue;
1410
1411                 /*
1412                  * Avoid the lockdep recursive warning on object->lock being
1413                  * previously acquired in scan_object(). These locks are
1414                  * enclosed by scan_mutex.
1415                  */
1416                 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1417                 /* only pass surplus references (object already gray) */
1418                 if (color_gray(object)) {
1419                         excess_ref = object->excess_ref;
1420                         /* no need for update_refs() if object already gray */
1421                 } else {
1422                         excess_ref = 0;
1423                         update_refs(object);
1424                 }
1425                 raw_spin_unlock(&object->lock);
1426
1427                 if (excess_ref) {
1428                         object = lookup_object(excess_ref, 0);
1429                         if (!object)
1430                                 continue;
1431                         if (object == scanned)
1432                                 /* circular reference, ignore */
1433                                 continue;
1434                         raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1435                         update_refs(object);
1436                         raw_spin_unlock(&object->lock);
1437                 }
1438         }
1439         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1440 }
1441
1442 /*
1443  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1444  */
1445 #ifdef CONFIG_SMP
1446 static void scan_large_block(void *start, void *end)
1447 {
1448         void *next;
1449
1450         while (start < end) {
1451                 next = min(start + MAX_SCAN_SIZE, end);
1452                 scan_block(start, next, NULL);
1453                 start = next;
1454                 cond_resched();
1455         }
1456 }
1457 #endif
1458
1459 /*
1460  * Scan a memory block corresponding to a kmemleak_object. A condition is
1461  * that object->use_count >= 1.
1462  */
1463 static void scan_object(struct kmemleak_object *object)
1464 {
1465         struct kmemleak_scan_area *area;
1466         unsigned long flags;
1467
1468         /*
1469          * Once the object->lock is acquired, the corresponding memory block
1470          * cannot be freed (the same lock is acquired in delete_object).
1471          */
1472         raw_spin_lock_irqsave(&object->lock, flags);
1473         if (object->flags & OBJECT_NO_SCAN)
1474                 goto out;
1475         if (!(object->flags & OBJECT_ALLOCATED))
1476                 /* already freed object */
1477                 goto out;
1478
1479         if (object->flags & OBJECT_PERCPU) {
1480                 unsigned int cpu;
1481
1482                 for_each_possible_cpu(cpu) {
1483                         void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1484                         void *end = start + object->size;
1485
1486                         scan_block(start, end, object);
1487
1488                         raw_spin_unlock_irqrestore(&object->lock, flags);
1489                         cond_resched();
1490                         raw_spin_lock_irqsave(&object->lock, flags);
1491                         if (!(object->flags & OBJECT_ALLOCATED))
1492                                 break;
1493                 }
1494         } else if (hlist_empty(&object->area_list) ||
1495             object->flags & OBJECT_FULL_SCAN) {
1496                 void *start = object->flags & OBJECT_PHYS ?
1497                                 __va((phys_addr_t)object->pointer) :
1498                                 (void *)object->pointer;
1499                 void *end = start + object->size;
1500                 void *next;
1501
1502                 do {
1503                         next = min(start + MAX_SCAN_SIZE, end);
1504                         scan_block(start, next, object);
1505
1506                         start = next;
1507                         if (start >= end)
1508                                 break;
1509
1510                         raw_spin_unlock_irqrestore(&object->lock, flags);
1511                         cond_resched();
1512                         raw_spin_lock_irqsave(&object->lock, flags);
1513                 } while (object->flags & OBJECT_ALLOCATED);
1514         } else {
1515                 hlist_for_each_entry(area, &object->area_list, node)
1516                         scan_block((void *)area->start,
1517                                    (void *)(area->start + area->size),
1518                                    object);
1519         }
1520 out:
1521         raw_spin_unlock_irqrestore(&object->lock, flags);
1522 }
1523
1524 /*
1525  * Scan the objects already referenced (gray objects). More objects will be
1526  * referenced and, if there are no memory leaks, all the objects are scanned.
1527  */
1528 static void scan_gray_list(void)
1529 {
1530         struct kmemleak_object *object, *tmp;
1531
1532         /*
1533          * The list traversal is safe for both tail additions and removals
1534          * from inside the loop. The kmemleak objects cannot be freed from
1535          * outside the loop because their use_count was incremented.
1536          */
1537         object = list_entry(gray_list.next, typeof(*object), gray_list);
1538         while (&object->gray_list != &gray_list) {
1539                 cond_resched();
1540
1541                 /* may add new objects to the list */
1542                 if (!scan_should_stop())
1543                         scan_object(object);
1544
1545                 tmp = list_entry(object->gray_list.next, typeof(*object),
1546                                  gray_list);
1547
1548                 /* remove the object from the list and release it */
1549                 list_del(&object->gray_list);
1550                 put_object(object);
1551
1552                 object = tmp;
1553         }
1554         WARN_ON(!list_empty(&gray_list));
1555 }
1556
1557 /*
1558  * Conditionally call resched() in an object iteration loop while making sure
1559  * that the given object won't go away without RCU read lock by performing a
1560  * get_object() if necessaary.
1561  */
1562 static void kmemleak_cond_resched(struct kmemleak_object *object)
1563 {
1564         if (!get_object(object))
1565                 return; /* Try next object */
1566
1567         raw_spin_lock_irq(&kmemleak_lock);
1568         if (object->del_state & DELSTATE_REMOVED)
1569                 goto unlock_put;        /* Object removed */
1570         object->del_state |= DELSTATE_NO_DELETE;
1571         raw_spin_unlock_irq(&kmemleak_lock);
1572
1573         rcu_read_unlock();
1574         cond_resched();
1575         rcu_read_lock();
1576
1577         raw_spin_lock_irq(&kmemleak_lock);
1578         if (object->del_state & DELSTATE_REMOVED)
1579                 list_del_rcu(&object->object_list);
1580         object->del_state &= ~DELSTATE_NO_DELETE;
1581 unlock_put:
1582         raw_spin_unlock_irq(&kmemleak_lock);
1583         put_object(object);
1584 }
1585
1586 /*
1587  * Scan data sections and all the referenced memory blocks allocated via the
1588  * kernel's standard allocators. This function must be called with the
1589  * scan_mutex held.
1590  */
1591 static void kmemleak_scan(void)
1592 {
1593         struct kmemleak_object *object;
1594         struct zone *zone;
1595         int __maybe_unused i;
1596         int new_leaks = 0;
1597
1598         jiffies_last_scan = jiffies;
1599
1600         /* prepare the kmemleak_object's */
1601         rcu_read_lock();
1602         list_for_each_entry_rcu(object, &object_list, object_list) {
1603                 raw_spin_lock_irq(&object->lock);
1604 #ifdef DEBUG
1605                 /*
1606                  * With a few exceptions there should be a maximum of
1607                  * 1 reference to any object at this point.
1608                  */
1609                 if (atomic_read(&object->use_count) > 1) {
1610                         pr_debug("object->use_count = %d\n",
1611                                  atomic_read(&object->use_count));
1612                         dump_object_info(object);
1613                 }
1614 #endif
1615
1616                 /* ignore objects outside lowmem (paint them black) */
1617                 if ((object->flags & OBJECT_PHYS) &&
1618                    !(object->flags & OBJECT_NO_SCAN)) {
1619                         unsigned long phys = object->pointer;
1620
1621                         if (PHYS_PFN(phys) < min_low_pfn ||
1622                             PHYS_PFN(phys + object->size) >= max_low_pfn)
1623                                 __paint_it(object, KMEMLEAK_BLACK);
1624                 }
1625
1626                 /* reset the reference count (whiten the object) */
1627                 object->count = 0;
1628                 if (color_gray(object) && get_object(object))
1629                         list_add_tail(&object->gray_list, &gray_list);
1630
1631                 raw_spin_unlock_irq(&object->lock);
1632
1633                 if (need_resched())
1634                         kmemleak_cond_resched(object);
1635         }
1636         rcu_read_unlock();
1637
1638 #ifdef CONFIG_SMP
1639         /* per-cpu sections scanning */
1640         for_each_possible_cpu(i)
1641                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1642                                  __per_cpu_end + per_cpu_offset(i));
1643 #endif
1644
1645         /*
1646          * Struct page scanning for each node.
1647          */
1648         get_online_mems();
1649         for_each_populated_zone(zone) {
1650                 unsigned long start_pfn = zone->zone_start_pfn;
1651                 unsigned long end_pfn = zone_end_pfn(zone);
1652                 unsigned long pfn;
1653
1654                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1655                         struct page *page = pfn_to_online_page(pfn);
1656
1657                         if (!(pfn & 63))
1658                                 cond_resched();
1659
1660                         if (!page)
1661                                 continue;
1662
1663                         /* only scan pages belonging to this zone */
1664                         if (page_zone(page) != zone)
1665                                 continue;
1666                         /* only scan if page is in use */
1667                         if (page_count(page) == 0)
1668                                 continue;
1669                         scan_block(page, page + 1, NULL);
1670                 }
1671         }
1672         put_online_mems();
1673
1674         /*
1675          * Scanning the task stacks (may introduce false negatives).
1676          */
1677         if (kmemleak_stack_scan) {
1678                 struct task_struct *p, *g;
1679
1680                 rcu_read_lock();
1681                 for_each_process_thread(g, p) {
1682                         void *stack = try_get_task_stack(p);
1683                         if (stack) {
1684                                 scan_block(stack, stack + THREAD_SIZE, NULL);
1685                                 put_task_stack(p);
1686                         }
1687                 }
1688                 rcu_read_unlock();
1689         }
1690
1691         /*
1692          * Scan the objects already referenced from the sections scanned
1693          * above.
1694          */
1695         scan_gray_list();
1696
1697         /*
1698          * Check for new or unreferenced objects modified since the previous
1699          * scan and color them gray until the next scan.
1700          */
1701         rcu_read_lock();
1702         list_for_each_entry_rcu(object, &object_list, object_list) {
1703                 if (need_resched())
1704                         kmemleak_cond_resched(object);
1705
1706                 /*
1707                  * This is racy but we can save the overhead of lock/unlock
1708                  * calls. The missed objects, if any, should be caught in
1709                  * the next scan.
1710                  */
1711                 if (!color_white(object))
1712                         continue;
1713                 raw_spin_lock_irq(&object->lock);
1714                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1715                     && update_checksum(object) && get_object(object)) {
1716                         /* color it gray temporarily */
1717                         object->count = object->min_count;
1718                         list_add_tail(&object->gray_list, &gray_list);
1719                 }
1720                 raw_spin_unlock_irq(&object->lock);
1721         }
1722         rcu_read_unlock();
1723
1724         /*
1725          * Re-scan the gray list for modified unreferenced objects.
1726          */
1727         scan_gray_list();
1728
1729         /*
1730          * If scanning was stopped do not report any new unreferenced objects.
1731          */
1732         if (scan_should_stop())
1733                 return;
1734
1735         /*
1736          * Scanning result reporting.
1737          */
1738         rcu_read_lock();
1739         list_for_each_entry_rcu(object, &object_list, object_list) {
1740                 if (need_resched())
1741                         kmemleak_cond_resched(object);
1742
1743                 /*
1744                  * This is racy but we can save the overhead of lock/unlock
1745                  * calls. The missed objects, if any, should be caught in
1746                  * the next scan.
1747                  */
1748                 if (!color_white(object))
1749                         continue;
1750                 raw_spin_lock_irq(&object->lock);
1751                 if (unreferenced_object(object) &&
1752                     !(object->flags & OBJECT_REPORTED)) {
1753                         object->flags |= OBJECT_REPORTED;
1754
1755                         if (kmemleak_verbose)
1756                                 print_unreferenced(NULL, object);
1757
1758                         new_leaks++;
1759                 }
1760                 raw_spin_unlock_irq(&object->lock);
1761         }
1762         rcu_read_unlock();
1763
1764         if (new_leaks) {
1765                 kmemleak_found_leaks = true;
1766
1767                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1768                         new_leaks);
1769         }
1770
1771 }
1772
1773 /*
1774  * Thread function performing automatic memory scanning. Unreferenced objects
1775  * at the end of a memory scan are reported but only the first time.
1776  */
1777 static int kmemleak_scan_thread(void *arg)
1778 {
1779         static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1780
1781         pr_info("Automatic memory scanning thread started\n");
1782         set_user_nice(current, 10);
1783
1784         /*
1785          * Wait before the first scan to allow the system to fully initialize.
1786          */
1787         if (first_run) {
1788                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1789                 first_run = 0;
1790                 while (timeout && !kthread_should_stop())
1791                         timeout = schedule_timeout_interruptible(timeout);
1792         }
1793
1794         while (!kthread_should_stop()) {
1795                 signed long timeout = READ_ONCE(jiffies_scan_wait);
1796
1797                 mutex_lock(&scan_mutex);
1798                 kmemleak_scan();
1799                 mutex_unlock(&scan_mutex);
1800
1801                 /* wait before the next scan */
1802                 while (timeout && !kthread_should_stop())
1803                         timeout = schedule_timeout_interruptible(timeout);
1804         }
1805
1806         pr_info("Automatic memory scanning thread ended\n");
1807
1808         return 0;
1809 }
1810
1811 /*
1812  * Start the automatic memory scanning thread. This function must be called
1813  * with the scan_mutex held.
1814  */
1815 static void start_scan_thread(void)
1816 {
1817         if (scan_thread)
1818                 return;
1819         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1820         if (IS_ERR(scan_thread)) {
1821                 pr_warn("Failed to create the scan thread\n");
1822                 scan_thread = NULL;
1823         }
1824 }
1825
1826 /*
1827  * Stop the automatic memory scanning thread.
1828  */
1829 static void stop_scan_thread(void)
1830 {
1831         if (scan_thread) {
1832                 kthread_stop(scan_thread);
1833                 scan_thread = NULL;
1834         }
1835 }
1836
1837 /*
1838  * Iterate over the object_list and return the first valid object at or after
1839  * the required position with its use_count incremented. The function triggers
1840  * a memory scanning when the pos argument points to the first position.
1841  */
1842 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1843 {
1844         struct kmemleak_object *object;
1845         loff_t n = *pos;
1846         int err;
1847
1848         err = mutex_lock_interruptible(&scan_mutex);
1849         if (err < 0)
1850                 return ERR_PTR(err);
1851
1852         rcu_read_lock();
1853         list_for_each_entry_rcu(object, &object_list, object_list) {
1854                 if (n-- > 0)
1855                         continue;
1856                 if (get_object(object))
1857                         goto out;
1858         }
1859         object = NULL;
1860 out:
1861         return object;
1862 }
1863
1864 /*
1865  * Return the next object in the object_list. The function decrements the
1866  * use_count of the previous object and increases that of the next one.
1867  */
1868 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1869 {
1870         struct kmemleak_object *prev_obj = v;
1871         struct kmemleak_object *next_obj = NULL;
1872         struct kmemleak_object *obj = prev_obj;
1873
1874         ++(*pos);
1875
1876         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1877                 if (get_object(obj)) {
1878                         next_obj = obj;
1879                         break;
1880                 }
1881         }
1882
1883         put_object(prev_obj);
1884         return next_obj;
1885 }
1886
1887 /*
1888  * Decrement the use_count of the last object required, if any.
1889  */
1890 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1891 {
1892         if (!IS_ERR(v)) {
1893                 /*
1894                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1895                  * waiting was interrupted, so only release it if !IS_ERR.
1896                  */
1897                 rcu_read_unlock();
1898                 mutex_unlock(&scan_mutex);
1899                 if (v)
1900                         put_object(v);
1901         }
1902 }
1903
1904 /*
1905  * Print the information for an unreferenced object to the seq file.
1906  */
1907 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1908 {
1909         struct kmemleak_object *object = v;
1910         unsigned long flags;
1911
1912         raw_spin_lock_irqsave(&object->lock, flags);
1913         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1914                 print_unreferenced(seq, object);
1915         raw_spin_unlock_irqrestore(&object->lock, flags);
1916         return 0;
1917 }
1918
1919 static const struct seq_operations kmemleak_seq_ops = {
1920         .start = kmemleak_seq_start,
1921         .next  = kmemleak_seq_next,
1922         .stop  = kmemleak_seq_stop,
1923         .show  = kmemleak_seq_show,
1924 };
1925
1926 static int kmemleak_open(struct inode *inode, struct file *file)
1927 {
1928         return seq_open(file, &kmemleak_seq_ops);
1929 }
1930
1931 static int dump_str_object_info(const char *str)
1932 {
1933         unsigned long flags;
1934         struct kmemleak_object *object;
1935         unsigned long addr;
1936
1937         if (kstrtoul(str, 0, &addr))
1938                 return -EINVAL;
1939         object = find_and_get_object(addr, 0);
1940         if (!object) {
1941                 pr_info("Unknown object at 0x%08lx\n", addr);
1942                 return -EINVAL;
1943         }
1944
1945         raw_spin_lock_irqsave(&object->lock, flags);
1946         dump_object_info(object);
1947         raw_spin_unlock_irqrestore(&object->lock, flags);
1948
1949         put_object(object);
1950         return 0;
1951 }
1952
1953 /*
1954  * We use grey instead of black to ensure we can do future scans on the same
1955  * objects. If we did not do future scans these black objects could
1956  * potentially contain references to newly allocated objects in the future and
1957  * we'd end up with false positives.
1958  */
1959 static void kmemleak_clear(void)
1960 {
1961         struct kmemleak_object *object;
1962
1963         rcu_read_lock();
1964         list_for_each_entry_rcu(object, &object_list, object_list) {
1965                 raw_spin_lock_irq(&object->lock);
1966                 if ((object->flags & OBJECT_REPORTED) &&
1967                     unreferenced_object(object))
1968                         __paint_it(object, KMEMLEAK_GREY);
1969                 raw_spin_unlock_irq(&object->lock);
1970         }
1971         rcu_read_unlock();
1972
1973         kmemleak_found_leaks = false;
1974 }
1975
1976 static void __kmemleak_do_cleanup(void);
1977
1978 /*
1979  * File write operation to configure kmemleak at run-time. The following
1980  * commands can be written to the /sys/kernel/debug/kmemleak file:
1981  *   off        - disable kmemleak (irreversible)
1982  *   stack=on   - enable the task stacks scanning
1983  *   stack=off  - disable the tasks stacks scanning
1984  *   scan=on    - start the automatic memory scanning thread
1985  *   scan=off   - stop the automatic memory scanning thread
1986  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1987  *                disable it)
1988  *   scan       - trigger a memory scan
1989  *   clear      - mark all current reported unreferenced kmemleak objects as
1990  *                grey to ignore printing them, or free all kmemleak objects
1991  *                if kmemleak has been disabled.
1992  *   dump=...   - dump information about the object found at the given address
1993  */
1994 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1995                               size_t size, loff_t *ppos)
1996 {
1997         char buf[64];
1998         int buf_size;
1999         int ret;
2000
2001         buf_size = min(size, (sizeof(buf) - 1));
2002         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2003                 return -EFAULT;
2004         buf[buf_size] = 0;
2005
2006         ret = mutex_lock_interruptible(&scan_mutex);
2007         if (ret < 0)
2008                 return ret;
2009
2010         if (strncmp(buf, "clear", 5) == 0) {
2011                 if (kmemleak_enabled)
2012                         kmemleak_clear();
2013                 else
2014                         __kmemleak_do_cleanup();
2015                 goto out;
2016         }
2017
2018         if (!kmemleak_enabled) {
2019                 ret = -EPERM;
2020                 goto out;
2021         }
2022
2023         if (strncmp(buf, "off", 3) == 0)
2024                 kmemleak_disable();
2025         else if (strncmp(buf, "stack=on", 8) == 0)
2026                 kmemleak_stack_scan = 1;
2027         else if (strncmp(buf, "stack=off", 9) == 0)
2028                 kmemleak_stack_scan = 0;
2029         else if (strncmp(buf, "scan=on", 7) == 0)
2030                 start_scan_thread();
2031         else if (strncmp(buf, "scan=off", 8) == 0)
2032                 stop_scan_thread();
2033         else if (strncmp(buf, "scan=", 5) == 0) {
2034                 unsigned secs;
2035                 unsigned long msecs;
2036
2037                 ret = kstrtouint(buf + 5, 0, &secs);
2038                 if (ret < 0)
2039                         goto out;
2040
2041                 msecs = secs * MSEC_PER_SEC;
2042                 if (msecs > UINT_MAX)
2043                         msecs = UINT_MAX;
2044
2045                 stop_scan_thread();
2046                 if (msecs) {
2047                         WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2048                         start_scan_thread();
2049                 }
2050         } else if (strncmp(buf, "scan", 4) == 0)
2051                 kmemleak_scan();
2052         else if (strncmp(buf, "dump=", 5) == 0)
2053                 ret = dump_str_object_info(buf + 5);
2054         else
2055                 ret = -EINVAL;
2056
2057 out:
2058         mutex_unlock(&scan_mutex);
2059         if (ret < 0)
2060                 return ret;
2061
2062         /* ignore the rest of the buffer, only one command at a time */
2063         *ppos += size;
2064         return size;
2065 }
2066
2067 static const struct file_operations kmemleak_fops = {
2068         .owner          = THIS_MODULE,
2069         .open           = kmemleak_open,
2070         .read           = seq_read,
2071         .write          = kmemleak_write,
2072         .llseek         = seq_lseek,
2073         .release        = seq_release,
2074 };
2075
2076 static void __kmemleak_do_cleanup(void)
2077 {
2078         struct kmemleak_object *object, *tmp;
2079
2080         /*
2081          * Kmemleak has already been disabled, no need for RCU list traversal
2082          * or kmemleak_lock held.
2083          */
2084         list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2085                 __remove_object(object);
2086                 __delete_object(object);
2087         }
2088 }
2089
2090 /*
2091  * Stop the memory scanning thread and free the kmemleak internal objects if
2092  * no previous scan thread (otherwise, kmemleak may still have some useful
2093  * information on memory leaks).
2094  */
2095 static void kmemleak_do_cleanup(struct work_struct *work)
2096 {
2097         stop_scan_thread();
2098
2099         mutex_lock(&scan_mutex);
2100         /*
2101          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2102          * longer track object freeing. Ordering of the scan thread stopping and
2103          * the memory accesses below is guaranteed by the kthread_stop()
2104          * function.
2105          */
2106         kmemleak_free_enabled = 0;
2107         mutex_unlock(&scan_mutex);
2108
2109         if (!kmemleak_found_leaks)
2110                 __kmemleak_do_cleanup();
2111         else
2112                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2113 }
2114
2115 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2116
2117 /*
2118  * Disable kmemleak. No memory allocation/freeing will be traced once this
2119  * function is called. Disabling kmemleak is an irreversible operation.
2120  */
2121 static void kmemleak_disable(void)
2122 {
2123         /* atomically check whether it was already invoked */
2124         if (cmpxchg(&kmemleak_error, 0, 1))
2125                 return;
2126
2127         /* stop any memory operation tracing */
2128         kmemleak_enabled = 0;
2129
2130         /* check whether it is too early for a kernel thread */
2131         if (kmemleak_late_initialized)
2132                 schedule_work(&cleanup_work);
2133         else
2134                 kmemleak_free_enabled = 0;
2135
2136         pr_info("Kernel memory leak detector disabled\n");
2137 }
2138
2139 /*
2140  * Allow boot-time kmemleak disabling (enabled by default).
2141  */
2142 static int __init kmemleak_boot_config(char *str)
2143 {
2144         if (!str)
2145                 return -EINVAL;
2146         if (strcmp(str, "off") == 0)
2147                 kmemleak_disable();
2148         else if (strcmp(str, "on") == 0) {
2149                 kmemleak_skip_disable = 1;
2150                 stack_depot_request_early_init();
2151         }
2152         else
2153                 return -EINVAL;
2154         return 0;
2155 }
2156 early_param("kmemleak", kmemleak_boot_config);
2157
2158 /*
2159  * Kmemleak initialization.
2160  */
2161 void __init kmemleak_init(void)
2162 {
2163 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2164         if (!kmemleak_skip_disable) {
2165                 kmemleak_disable();
2166                 return;
2167         }
2168 #endif
2169
2170         if (kmemleak_error)
2171                 return;
2172
2173         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2174         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2175
2176         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2177         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2178
2179         /* register the data/bss sections */
2180         create_object((unsigned long)_sdata, _edata - _sdata,
2181                       KMEMLEAK_GREY, GFP_ATOMIC);
2182         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2183                       KMEMLEAK_GREY, GFP_ATOMIC);
2184         /* only register .data..ro_after_init if not within .data */
2185         if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2186                 create_object((unsigned long)__start_ro_after_init,
2187                               __end_ro_after_init - __start_ro_after_init,
2188                               KMEMLEAK_GREY, GFP_ATOMIC);
2189 }
2190
2191 /*
2192  * Late initialization function.
2193  */
2194 static int __init kmemleak_late_init(void)
2195 {
2196         kmemleak_late_initialized = 1;
2197
2198         debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2199
2200         if (kmemleak_error) {
2201                 /*
2202                  * Some error occurred and kmemleak was disabled. There is a
2203                  * small chance that kmemleak_disable() was called immediately
2204                  * after setting kmemleak_late_initialized and we may end up with
2205                  * two clean-up threads but serialized by scan_mutex.
2206                  */
2207                 schedule_work(&cleanup_work);
2208                 return -ENOMEM;
2209         }
2210
2211         if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2212                 mutex_lock(&scan_mutex);
2213                 start_scan_thread();
2214                 mutex_unlock(&scan_mutex);
2215         }
2216
2217         pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2218                 mem_pool_free_count);
2219
2220         return 0;
2221 }
2222 late_initcall(kmemleak_late_init);