GNU Linux-libre 4.14.332-gnu1
[releases.git] / mm / kmemleak.c
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/dev-tools/kmemleak.rst.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a red black tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * Locks and mutexes are acquired/nested in the following order:
57  *
58  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59  *
60  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61  * regions.
62  *
63  * The kmemleak_object structures have a use_count incremented or decremented
64  * using the get_object()/put_object() functions. When the use_count becomes
65  * 0, this count can no longer be incremented and put_object() schedules the
66  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67  * function must be protected by rcu_read_lock() to avoid accessing a freed
68  * structure.
69  */
70
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
84 #include <linux/fs.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/mutex.h>
90 #include <linux/rcupdate.h>
91 #include <linux/stacktrace.h>
92 #include <linux/cache.h>
93 #include <linux/percpu.h>
94 #include <linux/hardirq.h>
95 #include <linux/bootmem.h>
96 #include <linux/pfn.h>
97 #include <linux/mmzone.h>
98 #include <linux/slab.h>
99 #include <linux/thread_info.h>
100 #include <linux/err.h>
101 #include <linux/uaccess.h>
102 #include <linux/string.h>
103 #include <linux/nodemask.h>
104 #include <linux/mm.h>
105 #include <linux/workqueue.h>
106 #include <linux/crc32.h>
107
108 #include <asm/sections.h>
109 #include <asm/processor.h>
110 #include <linux/atomic.h>
111
112 #include <linux/kasan.h>
113 #include <linux/kmemleak.h>
114 #include <linux/memory_hotplug.h>
115
116 /*
117  * Kmemleak configuration and common defines.
118  */
119 #define MAX_TRACE               16      /* stack trace length */
120 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
121 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
122 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
123 #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
124
125 #define BYTES_PER_POINTER       sizeof(void *)
126
127 /* GFP bitmask for kmemleak internal allocations */
128 #define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129                                  __GFP_NORETRY | __GFP_NOMEMALLOC | \
130                                  __GFP_NOWARN)
131
132 /* scanning area inside a memory block */
133 struct kmemleak_scan_area {
134         struct hlist_node node;
135         unsigned long start;
136         size_t size;
137 };
138
139 #define KMEMLEAK_GREY   0
140 #define KMEMLEAK_BLACK  -1
141
142 /*
143  * Structure holding the metadata for each allocated memory block.
144  * Modifications to such objects should be made while holding the
145  * object->lock. Insertions or deletions from object_list, gray_list or
146  * rb_node are already protected by the corresponding locks or mutex (see
147  * the notes on locking above). These objects are reference-counted
148  * (use_count) and freed using the RCU mechanism.
149  */
150 struct kmemleak_object {
151         spinlock_t lock;
152         unsigned int flags;             /* object status flags */
153         struct list_head object_list;
154         struct list_head gray_list;
155         struct rb_node rb_node;
156         struct rcu_head rcu;            /* object_list lockless traversal */
157         /* object usage count; object freed when use_count == 0 */
158         atomic_t use_count;
159         unsigned long pointer;
160         size_t size;
161         /* pass surplus references to this pointer */
162         unsigned long excess_ref;
163         /* minimum number of a pointers found before it is considered leak */
164         int min_count;
165         /* the total number of pointers found pointing to this object */
166         int count;
167         /* checksum for detecting modified objects */
168         u32 checksum;
169         /* memory ranges to be scanned inside an object (empty for all) */
170         struct hlist_head area_list;
171         unsigned long trace[MAX_TRACE];
172         unsigned int trace_len;
173         unsigned long jiffies;          /* creation timestamp */
174         pid_t pid;                      /* pid of the current task */
175         char comm[TASK_COMM_LEN];       /* executable name */
176 };
177
178 /* flag representing the memory block allocation status */
179 #define OBJECT_ALLOCATED        (1 << 0)
180 /* flag set after the first reporting of an unreference object */
181 #define OBJECT_REPORTED         (1 << 1)
182 /* flag set to not scan the object */
183 #define OBJECT_NO_SCAN          (1 << 2)
184
185 /* number of bytes to print per line; must be 16 or 32 */
186 #define HEX_ROW_SIZE            16
187 /* number of bytes to print at a time (1, 2, 4, 8) */
188 #define HEX_GROUP_SIZE          1
189 /* include ASCII after the hex output */
190 #define HEX_ASCII               1
191 /* max number of lines to be printed */
192 #define HEX_MAX_LINES           2
193
194 /* the list of all allocated objects */
195 static LIST_HEAD(object_list);
196 /* the list of gray-colored objects (see color_gray comment below) */
197 static LIST_HEAD(gray_list);
198 /* search tree for object boundaries */
199 static struct rb_root object_tree_root = RB_ROOT;
200 /* rw_lock protecting the access to object_list and object_tree_root */
201 static DEFINE_RWLOCK(kmemleak_lock);
202
203 /* allocation caches for kmemleak internal data */
204 static struct kmem_cache *object_cache;
205 static struct kmem_cache *scan_area_cache;
206
207 /* set if tracing memory operations is enabled */
208 static int kmemleak_enabled;
209 /* same as above but only for the kmemleak_free() callback */
210 static int kmemleak_free_enabled;
211 /* set in the late_initcall if there were no errors */
212 static int kmemleak_initialized;
213 /* enables or disables early logging of the memory operations */
214 static int kmemleak_early_log = 1;
215 /* set if a kmemleak warning was issued */
216 static int kmemleak_warning;
217 /* set if a fatal kmemleak error has occurred */
218 static int kmemleak_error;
219
220 /* minimum and maximum address that may be valid pointers */
221 static unsigned long min_addr = ULONG_MAX;
222 static unsigned long max_addr;
223
224 static struct task_struct *scan_thread;
225 /* used to avoid reporting of recently allocated objects */
226 static unsigned long jiffies_min_age;
227 static unsigned long jiffies_last_scan;
228 /* delay between automatic memory scannings */
229 static signed long jiffies_scan_wait;
230 /* enables or disables the task stacks scanning */
231 static int kmemleak_stack_scan = 1;
232 /* protects the memory scanning, parameters and debug/kmemleak file access */
233 static DEFINE_MUTEX(scan_mutex);
234 /* setting kmemleak=on, will set this var, skipping the disable */
235 static int kmemleak_skip_disable;
236 /* If there are leaks that can be reported */
237 static bool kmemleak_found_leaks;
238
239 /*
240  * Early object allocation/freeing logging. Kmemleak is initialized after the
241  * kernel allocator. However, both the kernel allocator and kmemleak may
242  * allocate memory blocks which need to be tracked. Kmemleak defines an
243  * arbitrary buffer to hold the allocation/freeing information before it is
244  * fully initialized.
245  */
246
247 /* kmemleak operation type for early logging */
248 enum {
249         KMEMLEAK_ALLOC,
250         KMEMLEAK_ALLOC_PERCPU,
251         KMEMLEAK_FREE,
252         KMEMLEAK_FREE_PART,
253         KMEMLEAK_FREE_PERCPU,
254         KMEMLEAK_NOT_LEAK,
255         KMEMLEAK_IGNORE,
256         KMEMLEAK_SCAN_AREA,
257         KMEMLEAK_NO_SCAN,
258         KMEMLEAK_SET_EXCESS_REF
259 };
260
261 /*
262  * Structure holding the information passed to kmemleak callbacks during the
263  * early logging.
264  */
265 struct early_log {
266         int op_type;                    /* kmemleak operation type */
267         int min_count;                  /* minimum reference count */
268         const void *ptr;                /* allocated/freed memory block */
269         union {
270                 size_t size;            /* memory block size */
271                 unsigned long excess_ref; /* surplus reference passing */
272         };
273         unsigned long trace[MAX_TRACE]; /* stack trace */
274         unsigned int trace_len;         /* stack trace length */
275 };
276
277 /* early logging buffer and current position */
278 static struct early_log
279         early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
280 static int crt_early_log __initdata;
281
282 static void kmemleak_disable(void);
283
284 /*
285  * Print a warning and dump the stack trace.
286  */
287 #define kmemleak_warn(x...)     do {            \
288         pr_warn(x);                             \
289         dump_stack();                           \
290         kmemleak_warning = 1;                   \
291 } while (0)
292
293 /*
294  * Macro invoked when a serious kmemleak condition occurred and cannot be
295  * recovered from. Kmemleak will be disabled and further allocation/freeing
296  * tracing no longer available.
297  */
298 #define kmemleak_stop(x...)     do {    \
299         kmemleak_warn(x);               \
300         kmemleak_disable();             \
301 } while (0)
302
303 /*
304  * Printing of the objects hex dump to the seq file. The number of lines to be
305  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
306  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
307  * with the object->lock held.
308  */
309 static void hex_dump_object(struct seq_file *seq,
310                             struct kmemleak_object *object)
311 {
312         const u8 *ptr = (const u8 *)object->pointer;
313         size_t len;
314
315         /* limit the number of lines to HEX_MAX_LINES */
316         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
317
318         seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
319         kasan_disable_current();
320         seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
321                      HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
322         kasan_enable_current();
323 }
324
325 /*
326  * Object colors, encoded with count and min_count:
327  * - white - orphan object, not enough references to it (count < min_count)
328  * - gray  - not orphan, not marked as false positive (min_count == 0) or
329  *              sufficient references to it (count >= min_count)
330  * - black - ignore, it doesn't contain references (e.g. text section)
331  *              (min_count == -1). No function defined for this color.
332  * Newly created objects don't have any color assigned (object->count == -1)
333  * before the next memory scan when they become white.
334  */
335 static bool color_white(const struct kmemleak_object *object)
336 {
337         return object->count != KMEMLEAK_BLACK &&
338                 object->count < object->min_count;
339 }
340
341 static bool color_gray(const struct kmemleak_object *object)
342 {
343         return object->min_count != KMEMLEAK_BLACK &&
344                 object->count >= object->min_count;
345 }
346
347 /*
348  * Objects are considered unreferenced only if their color is white, they have
349  * not be deleted and have a minimum age to avoid false positives caused by
350  * pointers temporarily stored in CPU registers.
351  */
352 static bool unreferenced_object(struct kmemleak_object *object)
353 {
354         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
355                 time_before_eq(object->jiffies + jiffies_min_age,
356                                jiffies_last_scan);
357 }
358
359 /*
360  * Printing of the unreferenced objects information to the seq file. The
361  * print_unreferenced function must be called with the object->lock held.
362  */
363 static void print_unreferenced(struct seq_file *seq,
364                                struct kmemleak_object *object)
365 {
366         int i;
367         unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
368
369         seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
370                    object->pointer, object->size);
371         seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
372                    object->comm, object->pid, object->jiffies,
373                    msecs_age / 1000, msecs_age % 1000);
374         hex_dump_object(seq, object);
375         seq_printf(seq, "  backtrace:\n");
376
377         for (i = 0; i < object->trace_len; i++) {
378                 void *ptr = (void *)object->trace[i];
379                 seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
380         }
381 }
382
383 /*
384  * Print the kmemleak_object information. This function is used mainly for
385  * debugging special cases when kmemleak operations. It must be called with
386  * the object->lock held.
387  */
388 static void dump_object_info(struct kmemleak_object *object)
389 {
390         struct stack_trace trace;
391
392         trace.nr_entries = object->trace_len;
393         trace.entries = object->trace;
394
395         pr_notice("Object 0x%08lx (size %zu):\n",
396                   object->pointer, object->size);
397         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
398                   object->comm, object->pid, object->jiffies);
399         pr_notice("  min_count = %d\n", object->min_count);
400         pr_notice("  count = %d\n", object->count);
401         pr_notice("  flags = 0x%x\n", object->flags);
402         pr_notice("  checksum = %u\n", object->checksum);
403         pr_notice("  backtrace:\n");
404         print_stack_trace(&trace, 4);
405 }
406
407 /*
408  * Look-up a memory block metadata (kmemleak_object) in the object search
409  * tree based on a pointer value. If alias is 0, only values pointing to the
410  * beginning of the memory block are allowed. The kmemleak_lock must be held
411  * when calling this function.
412  */
413 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
414 {
415         struct rb_node *rb = object_tree_root.rb_node;
416
417         while (rb) {
418                 struct kmemleak_object *object =
419                         rb_entry(rb, struct kmemleak_object, rb_node);
420                 if (ptr < object->pointer)
421                         rb = object->rb_node.rb_left;
422                 else if (object->pointer + object->size <= ptr)
423                         rb = object->rb_node.rb_right;
424                 else if (object->pointer == ptr || alias)
425                         return object;
426                 else {
427                         kmemleak_warn("Found object by alias at 0x%08lx\n",
428                                       ptr);
429                         dump_object_info(object);
430                         break;
431                 }
432         }
433         return NULL;
434 }
435
436 /*
437  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
438  * that once an object's use_count reached 0, the RCU freeing was already
439  * registered and the object should no longer be used. This function must be
440  * called under the protection of rcu_read_lock().
441  */
442 static int get_object(struct kmemleak_object *object)
443 {
444         return atomic_inc_not_zero(&object->use_count);
445 }
446
447 /*
448  * RCU callback to free a kmemleak_object.
449  */
450 static void free_object_rcu(struct rcu_head *rcu)
451 {
452         struct hlist_node *tmp;
453         struct kmemleak_scan_area *area;
454         struct kmemleak_object *object =
455                 container_of(rcu, struct kmemleak_object, rcu);
456
457         /*
458          * Once use_count is 0 (guaranteed by put_object), there is no other
459          * code accessing this object, hence no need for locking.
460          */
461         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
462                 hlist_del(&area->node);
463                 kmem_cache_free(scan_area_cache, area);
464         }
465         kmem_cache_free(object_cache, object);
466 }
467
468 /*
469  * Decrement the object use_count. Once the count is 0, free the object using
470  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
471  * delete_object() path, the delayed RCU freeing ensures that there is no
472  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
473  * is also possible.
474  */
475 static void put_object(struct kmemleak_object *object)
476 {
477         if (!atomic_dec_and_test(&object->use_count))
478                 return;
479
480         /* should only get here after delete_object was called */
481         WARN_ON(object->flags & OBJECT_ALLOCATED);
482
483         call_rcu(&object->rcu, free_object_rcu);
484 }
485
486 /*
487  * Look up an object in the object search tree and increase its use_count.
488  */
489 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
490 {
491         unsigned long flags;
492         struct kmemleak_object *object;
493
494         rcu_read_lock();
495         read_lock_irqsave(&kmemleak_lock, flags);
496         object = lookup_object(ptr, alias);
497         read_unlock_irqrestore(&kmemleak_lock, flags);
498
499         /* check whether the object is still available */
500         if (object && !get_object(object))
501                 object = NULL;
502         rcu_read_unlock();
503
504         return object;
505 }
506
507 /*
508  * Look up an object in the object search tree and remove it from both
509  * object_tree_root and object_list. The returned object's use_count should be
510  * at least 1, as initially set by create_object().
511  */
512 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
513 {
514         unsigned long flags;
515         struct kmemleak_object *object;
516
517         write_lock_irqsave(&kmemleak_lock, flags);
518         object = lookup_object(ptr, alias);
519         if (object) {
520                 rb_erase(&object->rb_node, &object_tree_root);
521                 list_del_rcu(&object->object_list);
522         }
523         write_unlock_irqrestore(&kmemleak_lock, flags);
524
525         return object;
526 }
527
528 /*
529  * Save stack trace to the given array of MAX_TRACE size.
530  */
531 static int __save_stack_trace(unsigned long *trace)
532 {
533         struct stack_trace stack_trace;
534
535         stack_trace.max_entries = MAX_TRACE;
536         stack_trace.nr_entries = 0;
537         stack_trace.entries = trace;
538         stack_trace.skip = 2;
539         save_stack_trace(&stack_trace);
540
541         return stack_trace.nr_entries;
542 }
543
544 /*
545  * Create the metadata (struct kmemleak_object) corresponding to an allocated
546  * memory block and add it to the object_list and object_tree_root.
547  */
548 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
549                                              int min_count, gfp_t gfp)
550 {
551         unsigned long flags;
552         struct kmemleak_object *object, *parent;
553         struct rb_node **link, *rb_parent;
554
555         object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
556         if (!object) {
557                 pr_warn("Cannot allocate a kmemleak_object structure\n");
558                 kmemleak_disable();
559                 return NULL;
560         }
561
562         INIT_LIST_HEAD(&object->object_list);
563         INIT_LIST_HEAD(&object->gray_list);
564         INIT_HLIST_HEAD(&object->area_list);
565         spin_lock_init(&object->lock);
566         atomic_set(&object->use_count, 1);
567         object->flags = OBJECT_ALLOCATED;
568         object->pointer = ptr;
569         object->size = size;
570         object->excess_ref = 0;
571         object->min_count = min_count;
572         object->count = 0;                      /* white color initially */
573         object->jiffies = jiffies;
574         object->checksum = 0;
575
576         /* task information */
577         if (in_irq()) {
578                 object->pid = 0;
579                 strncpy(object->comm, "hardirq", sizeof(object->comm));
580         } else if (in_serving_softirq()) {
581                 object->pid = 0;
582                 strncpy(object->comm, "softirq", sizeof(object->comm));
583         } else {
584                 object->pid = current->pid;
585                 /*
586                  * There is a small chance of a race with set_task_comm(),
587                  * however using get_task_comm() here may cause locking
588                  * dependency issues with current->alloc_lock. In the worst
589                  * case, the command line is not correct.
590                  */
591                 strncpy(object->comm, current->comm, sizeof(object->comm));
592         }
593
594         /* kernel backtrace */
595         object->trace_len = __save_stack_trace(object->trace);
596
597         write_lock_irqsave(&kmemleak_lock, flags);
598
599         min_addr = min(min_addr, ptr);
600         max_addr = max(max_addr, ptr + size);
601         link = &object_tree_root.rb_node;
602         rb_parent = NULL;
603         while (*link) {
604                 rb_parent = *link;
605                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
606                 if (ptr + size <= parent->pointer)
607                         link = &parent->rb_node.rb_left;
608                 else if (parent->pointer + parent->size <= ptr)
609                         link = &parent->rb_node.rb_right;
610                 else {
611                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
612                                       ptr);
613                         /*
614                          * No need for parent->lock here since "parent" cannot
615                          * be freed while the kmemleak_lock is held.
616                          */
617                         dump_object_info(parent);
618                         kmem_cache_free(object_cache, object);
619                         object = NULL;
620                         goto out;
621                 }
622         }
623         rb_link_node(&object->rb_node, rb_parent, link);
624         rb_insert_color(&object->rb_node, &object_tree_root);
625
626         list_add_tail_rcu(&object->object_list, &object_list);
627 out:
628         write_unlock_irqrestore(&kmemleak_lock, flags);
629         return object;
630 }
631
632 /*
633  * Mark the object as not allocated and schedule RCU freeing via put_object().
634  */
635 static void __delete_object(struct kmemleak_object *object)
636 {
637         unsigned long flags;
638
639         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
640         WARN_ON(atomic_read(&object->use_count) < 1);
641
642         /*
643          * Locking here also ensures that the corresponding memory block
644          * cannot be freed when it is being scanned.
645          */
646         spin_lock_irqsave(&object->lock, flags);
647         object->flags &= ~OBJECT_ALLOCATED;
648         spin_unlock_irqrestore(&object->lock, flags);
649         put_object(object);
650 }
651
652 /*
653  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
654  * delete it.
655  */
656 static void delete_object_full(unsigned long ptr)
657 {
658         struct kmemleak_object *object;
659
660         object = find_and_remove_object(ptr, 0);
661         if (!object) {
662 #ifdef DEBUG
663                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
664                               ptr);
665 #endif
666                 return;
667         }
668         __delete_object(object);
669 }
670
671 /*
672  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
673  * delete it. If the memory block is partially freed, the function may create
674  * additional metadata for the remaining parts of the block.
675  */
676 static void delete_object_part(unsigned long ptr, size_t size)
677 {
678         struct kmemleak_object *object;
679         unsigned long start, end;
680
681         object = find_and_remove_object(ptr, 1);
682         if (!object) {
683 #ifdef DEBUG
684                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
685                               ptr, size);
686 #endif
687                 return;
688         }
689
690         /*
691          * Create one or two objects that may result from the memory block
692          * split. Note that partial freeing is only done by free_bootmem() and
693          * this happens before kmemleak_init() is called. The path below is
694          * only executed during early log recording in kmemleak_init(), so
695          * GFP_KERNEL is enough.
696          */
697         start = object->pointer;
698         end = object->pointer + object->size;
699         if (ptr > start)
700                 create_object(start, ptr - start, object->min_count,
701                               GFP_KERNEL);
702         if (ptr + size < end)
703                 create_object(ptr + size, end - ptr - size, object->min_count,
704                               GFP_KERNEL);
705
706         __delete_object(object);
707 }
708
709 static void __paint_it(struct kmemleak_object *object, int color)
710 {
711         object->min_count = color;
712         if (color == KMEMLEAK_BLACK)
713                 object->flags |= OBJECT_NO_SCAN;
714 }
715
716 static void paint_it(struct kmemleak_object *object, int color)
717 {
718         unsigned long flags;
719
720         spin_lock_irqsave(&object->lock, flags);
721         __paint_it(object, color);
722         spin_unlock_irqrestore(&object->lock, flags);
723 }
724
725 static void paint_ptr(unsigned long ptr, int color)
726 {
727         struct kmemleak_object *object;
728
729         object = find_and_get_object(ptr, 0);
730         if (!object) {
731                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
732                               ptr,
733                               (color == KMEMLEAK_GREY) ? "Grey" :
734                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
735                 return;
736         }
737         paint_it(object, color);
738         put_object(object);
739 }
740
741 /*
742  * Mark an object permanently as gray-colored so that it can no longer be
743  * reported as a leak. This is used in general to mark a false positive.
744  */
745 static void make_gray_object(unsigned long ptr)
746 {
747         paint_ptr(ptr, KMEMLEAK_GREY);
748 }
749
750 /*
751  * Mark the object as black-colored so that it is ignored from scans and
752  * reporting.
753  */
754 static void make_black_object(unsigned long ptr)
755 {
756         paint_ptr(ptr, KMEMLEAK_BLACK);
757 }
758
759 /*
760  * Add a scanning area to the object. If at least one such area is added,
761  * kmemleak will only scan these ranges rather than the whole memory block.
762  */
763 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
764 {
765         unsigned long flags;
766         struct kmemleak_object *object;
767         struct kmemleak_scan_area *area;
768
769         object = find_and_get_object(ptr, 1);
770         if (!object) {
771                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
772                               ptr);
773                 return;
774         }
775
776         area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
777         if (!area) {
778                 pr_warn("Cannot allocate a scan area\n");
779                 goto out;
780         }
781
782         spin_lock_irqsave(&object->lock, flags);
783         if (size == SIZE_MAX) {
784                 size = object->pointer + object->size - ptr;
785         } else if (ptr + size > object->pointer + object->size) {
786                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
787                 dump_object_info(object);
788                 kmem_cache_free(scan_area_cache, area);
789                 goto out_unlock;
790         }
791
792         INIT_HLIST_NODE(&area->node);
793         area->start = ptr;
794         area->size = size;
795
796         hlist_add_head(&area->node, &object->area_list);
797 out_unlock:
798         spin_unlock_irqrestore(&object->lock, flags);
799 out:
800         put_object(object);
801 }
802
803 /*
804  * Any surplus references (object already gray) to 'ptr' are passed to
805  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
806  * vm_struct may be used as an alternative reference to the vmalloc'ed object
807  * (see free_thread_stack()).
808  */
809 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
810 {
811         unsigned long flags;
812         struct kmemleak_object *object;
813
814         object = find_and_get_object(ptr, 0);
815         if (!object) {
816                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
817                               ptr);
818                 return;
819         }
820
821         spin_lock_irqsave(&object->lock, flags);
822         object->excess_ref = excess_ref;
823         spin_unlock_irqrestore(&object->lock, flags);
824         put_object(object);
825 }
826
827 /*
828  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
829  * pointer. Such object will not be scanned by kmemleak but references to it
830  * are searched.
831  */
832 static void object_no_scan(unsigned long ptr)
833 {
834         unsigned long flags;
835         struct kmemleak_object *object;
836
837         object = find_and_get_object(ptr, 0);
838         if (!object) {
839                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
840                 return;
841         }
842
843         spin_lock_irqsave(&object->lock, flags);
844         object->flags |= OBJECT_NO_SCAN;
845         spin_unlock_irqrestore(&object->lock, flags);
846         put_object(object);
847 }
848
849 /*
850  * Log an early kmemleak_* call to the early_log buffer. These calls will be
851  * processed later once kmemleak is fully initialized.
852  */
853 static void __init log_early(int op_type, const void *ptr, size_t size,
854                              int min_count)
855 {
856         unsigned long flags;
857         struct early_log *log;
858
859         if (kmemleak_error) {
860                 /* kmemleak stopped recording, just count the requests */
861                 crt_early_log++;
862                 return;
863         }
864
865         if (crt_early_log >= ARRAY_SIZE(early_log)) {
866                 crt_early_log++;
867                 kmemleak_disable();
868                 return;
869         }
870
871         /*
872          * There is no need for locking since the kernel is still in UP mode
873          * at this stage. Disabling the IRQs is enough.
874          */
875         local_irq_save(flags);
876         log = &early_log[crt_early_log];
877         log->op_type = op_type;
878         log->ptr = ptr;
879         log->size = size;
880         log->min_count = min_count;
881         log->trace_len = __save_stack_trace(log->trace);
882         crt_early_log++;
883         local_irq_restore(flags);
884 }
885
886 /*
887  * Log an early allocated block and populate the stack trace.
888  */
889 static void early_alloc(struct early_log *log)
890 {
891         struct kmemleak_object *object;
892         unsigned long flags;
893         int i;
894
895         if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
896                 return;
897
898         /*
899          * RCU locking needed to ensure object is not freed via put_object().
900          */
901         rcu_read_lock();
902         object = create_object((unsigned long)log->ptr, log->size,
903                                log->min_count, GFP_ATOMIC);
904         if (!object)
905                 goto out;
906         spin_lock_irqsave(&object->lock, flags);
907         for (i = 0; i < log->trace_len; i++)
908                 object->trace[i] = log->trace[i];
909         object->trace_len = log->trace_len;
910         spin_unlock_irqrestore(&object->lock, flags);
911 out:
912         rcu_read_unlock();
913 }
914
915 /*
916  * Log an early allocated block and populate the stack trace.
917  */
918 static void early_alloc_percpu(struct early_log *log)
919 {
920         unsigned int cpu;
921         const void __percpu *ptr = log->ptr;
922
923         for_each_possible_cpu(cpu) {
924                 log->ptr = per_cpu_ptr(ptr, cpu);
925                 early_alloc(log);
926         }
927 }
928
929 /**
930  * kmemleak_alloc - register a newly allocated object
931  * @ptr:        pointer to beginning of the object
932  * @size:       size of the object
933  * @min_count:  minimum number of references to this object. If during memory
934  *              scanning a number of references less than @min_count is found,
935  *              the object is reported as a memory leak. If @min_count is 0,
936  *              the object is never reported as a leak. If @min_count is -1,
937  *              the object is ignored (not scanned and not reported as a leak)
938  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
939  *
940  * This function is called from the kernel allocators when a new object
941  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
942  */
943 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
944                           gfp_t gfp)
945 {
946         pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
947
948         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
949                 create_object((unsigned long)ptr, size, min_count, gfp);
950         else if (kmemleak_early_log)
951                 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
952 }
953 EXPORT_SYMBOL_GPL(kmemleak_alloc);
954
955 /**
956  * kmemleak_alloc_percpu - register a newly allocated __percpu object
957  * @ptr:        __percpu pointer to beginning of the object
958  * @size:       size of the object
959  * @gfp:        flags used for kmemleak internal memory allocations
960  *
961  * This function is called from the kernel percpu allocator when a new object
962  * (memory block) is allocated (alloc_percpu).
963  */
964 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
965                                  gfp_t gfp)
966 {
967         unsigned int cpu;
968
969         pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
970
971         /*
972          * Percpu allocations are only scanned and not reported as leaks
973          * (min_count is set to 0).
974          */
975         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
976                 for_each_possible_cpu(cpu)
977                         create_object((unsigned long)per_cpu_ptr(ptr, cpu),
978                                       size, 0, gfp);
979         else if (kmemleak_early_log)
980                 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
981 }
982 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
983
984 /**
985  * kmemleak_vmalloc - register a newly vmalloc'ed object
986  * @area:       pointer to vm_struct
987  * @size:       size of the object
988  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
989  *
990  * This function is called from the vmalloc() kernel allocator when a new
991  * object (memory block) is allocated.
992  */
993 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
994 {
995         pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
996
997         /*
998          * A min_count = 2 is needed because vm_struct contains a reference to
999          * the virtual address of the vmalloc'ed block.
1000          */
1001         if (kmemleak_enabled) {
1002                 create_object((unsigned long)area->addr, size, 2, gfp);
1003                 object_set_excess_ref((unsigned long)area,
1004                                       (unsigned long)area->addr);
1005         } else if (kmemleak_early_log) {
1006                 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1007                 /* reusing early_log.size for storing area->addr */
1008                 log_early(KMEMLEAK_SET_EXCESS_REF,
1009                           area, (unsigned long)area->addr, 0);
1010         }
1011 }
1012 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1013
1014 /**
1015  * kmemleak_free - unregister a previously registered object
1016  * @ptr:        pointer to beginning of the object
1017  *
1018  * This function is called from the kernel allocators when an object (memory
1019  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1020  */
1021 void __ref kmemleak_free(const void *ptr)
1022 {
1023         pr_debug("%s(0x%p)\n", __func__, ptr);
1024
1025         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1026                 delete_object_full((unsigned long)ptr);
1027         else if (kmemleak_early_log)
1028                 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1029 }
1030 EXPORT_SYMBOL_GPL(kmemleak_free);
1031
1032 /**
1033  * kmemleak_free_part - partially unregister a previously registered object
1034  * @ptr:        pointer to the beginning or inside the object. This also
1035  *              represents the start of the range to be freed
1036  * @size:       size to be unregistered
1037  *
1038  * This function is called when only a part of a memory block is freed
1039  * (usually from the bootmem allocator).
1040  */
1041 void __ref kmemleak_free_part(const void *ptr, size_t size)
1042 {
1043         pr_debug("%s(0x%p)\n", __func__, ptr);
1044
1045         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1046                 delete_object_part((unsigned long)ptr, size);
1047         else if (kmemleak_early_log)
1048                 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1049 }
1050 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1051
1052 /**
1053  * kmemleak_free_percpu - unregister a previously registered __percpu object
1054  * @ptr:        __percpu pointer to beginning of the object
1055  *
1056  * This function is called from the kernel percpu allocator when an object
1057  * (memory block) is freed (free_percpu).
1058  */
1059 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1060 {
1061         unsigned int cpu;
1062
1063         pr_debug("%s(0x%p)\n", __func__, ptr);
1064
1065         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1066                 for_each_possible_cpu(cpu)
1067                         delete_object_full((unsigned long)per_cpu_ptr(ptr,
1068                                                                       cpu));
1069         else if (kmemleak_early_log)
1070                 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1071 }
1072 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1073
1074 /**
1075  * kmemleak_update_trace - update object allocation stack trace
1076  * @ptr:        pointer to beginning of the object
1077  *
1078  * Override the object allocation stack trace for cases where the actual
1079  * allocation place is not always useful.
1080  */
1081 void __ref kmemleak_update_trace(const void *ptr)
1082 {
1083         struct kmemleak_object *object;
1084         unsigned long flags;
1085
1086         pr_debug("%s(0x%p)\n", __func__, ptr);
1087
1088         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1089                 return;
1090
1091         object = find_and_get_object((unsigned long)ptr, 1);
1092         if (!object) {
1093 #ifdef DEBUG
1094                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1095                               ptr);
1096 #endif
1097                 return;
1098         }
1099
1100         spin_lock_irqsave(&object->lock, flags);
1101         object->trace_len = __save_stack_trace(object->trace);
1102         spin_unlock_irqrestore(&object->lock, flags);
1103
1104         put_object(object);
1105 }
1106 EXPORT_SYMBOL(kmemleak_update_trace);
1107
1108 /**
1109  * kmemleak_not_leak - mark an allocated object as false positive
1110  * @ptr:        pointer to beginning of the object
1111  *
1112  * Calling this function on an object will cause the memory block to no longer
1113  * be reported as leak and always be scanned.
1114  */
1115 void __ref kmemleak_not_leak(const void *ptr)
1116 {
1117         pr_debug("%s(0x%p)\n", __func__, ptr);
1118
1119         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1120                 make_gray_object((unsigned long)ptr);
1121         else if (kmemleak_early_log)
1122                 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1123 }
1124 EXPORT_SYMBOL(kmemleak_not_leak);
1125
1126 /**
1127  * kmemleak_ignore - ignore an allocated object
1128  * @ptr:        pointer to beginning of the object
1129  *
1130  * Calling this function on an object will cause the memory block to be
1131  * ignored (not scanned and not reported as a leak). This is usually done when
1132  * it is known that the corresponding block is not a leak and does not contain
1133  * any references to other allocated memory blocks.
1134  */
1135 void __ref kmemleak_ignore(const void *ptr)
1136 {
1137         pr_debug("%s(0x%p)\n", __func__, ptr);
1138
1139         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1140                 make_black_object((unsigned long)ptr);
1141         else if (kmemleak_early_log)
1142                 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1143 }
1144 EXPORT_SYMBOL(kmemleak_ignore);
1145
1146 /**
1147  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1148  * @ptr:        pointer to beginning or inside the object. This also
1149  *              represents the start of the scan area
1150  * @size:       size of the scan area
1151  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1152  *
1153  * This function is used when it is known that only certain parts of an object
1154  * contain references to other objects. Kmemleak will only scan these areas
1155  * reducing the number false negatives.
1156  */
1157 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1158 {
1159         pr_debug("%s(0x%p)\n", __func__, ptr);
1160
1161         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1162                 add_scan_area((unsigned long)ptr, size, gfp);
1163         else if (kmemleak_early_log)
1164                 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1165 }
1166 EXPORT_SYMBOL(kmemleak_scan_area);
1167
1168 /**
1169  * kmemleak_no_scan - do not scan an allocated object
1170  * @ptr:        pointer to beginning of the object
1171  *
1172  * This function notifies kmemleak not to scan the given memory block. Useful
1173  * in situations where it is known that the given object does not contain any
1174  * references to other objects. Kmemleak will not scan such objects reducing
1175  * the number of false negatives.
1176  */
1177 void __ref kmemleak_no_scan(const void *ptr)
1178 {
1179         pr_debug("%s(0x%p)\n", __func__, ptr);
1180
1181         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1182                 object_no_scan((unsigned long)ptr);
1183         else if (kmemleak_early_log)
1184                 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1185 }
1186 EXPORT_SYMBOL(kmemleak_no_scan);
1187
1188 /**
1189  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1190  *                       address argument
1191  */
1192 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1193                                gfp_t gfp)
1194 {
1195         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1196                 kmemleak_alloc(__va(phys), size, min_count, gfp);
1197 }
1198 EXPORT_SYMBOL(kmemleak_alloc_phys);
1199
1200 /**
1201  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1202  *                           physical address argument
1203  */
1204 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1205 {
1206         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1207                 kmemleak_free_part(__va(phys), size);
1208 }
1209 EXPORT_SYMBOL(kmemleak_free_part_phys);
1210
1211 /**
1212  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1213  *                          address argument
1214  */
1215 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1216 {
1217         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1218                 kmemleak_not_leak(__va(phys));
1219 }
1220 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1221
1222 /**
1223  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1224  *                        address argument
1225  */
1226 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1227 {
1228         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1229                 kmemleak_ignore(__va(phys));
1230 }
1231 EXPORT_SYMBOL(kmemleak_ignore_phys);
1232
1233 /*
1234  * Update an object's checksum and return true if it was modified.
1235  */
1236 static bool update_checksum(struct kmemleak_object *object)
1237 {
1238         u32 old_csum = object->checksum;
1239
1240         kasan_disable_current();
1241         object->checksum = crc32(0, (void *)object->pointer, object->size);
1242         kasan_enable_current();
1243
1244         return object->checksum != old_csum;
1245 }
1246
1247 /*
1248  * Update an object's references. object->lock must be held by the caller.
1249  */
1250 static void update_refs(struct kmemleak_object *object)
1251 {
1252         if (!color_white(object)) {
1253                 /* non-orphan, ignored or new */
1254                 return;
1255         }
1256
1257         /*
1258          * Increase the object's reference count (number of pointers to the
1259          * memory block). If this count reaches the required minimum, the
1260          * object's color will become gray and it will be added to the
1261          * gray_list.
1262          */
1263         object->count++;
1264         if (color_gray(object)) {
1265                 /* put_object() called when removing from gray_list */
1266                 WARN_ON(!get_object(object));
1267                 list_add_tail(&object->gray_list, &gray_list);
1268         }
1269 }
1270
1271 /*
1272  * Memory scanning is a long process and it needs to be interruptable. This
1273  * function checks whether such interrupt condition occurred.
1274  */
1275 static int scan_should_stop(void)
1276 {
1277         if (!kmemleak_enabled)
1278                 return 1;
1279
1280         /*
1281          * This function may be called from either process or kthread context,
1282          * hence the need to check for both stop conditions.
1283          */
1284         if (current->mm)
1285                 return signal_pending(current);
1286         else
1287                 return kthread_should_stop();
1288
1289         return 0;
1290 }
1291
1292 /*
1293  * Scan a memory block (exclusive range) for valid pointers and add those
1294  * found to the gray list.
1295  */
1296 static void scan_block(void *_start, void *_end,
1297                        struct kmemleak_object *scanned)
1298 {
1299         unsigned long *ptr;
1300         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1301         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1302         unsigned long flags;
1303
1304         read_lock_irqsave(&kmemleak_lock, flags);
1305         for (ptr = start; ptr < end; ptr++) {
1306                 struct kmemleak_object *object;
1307                 unsigned long pointer;
1308                 unsigned long excess_ref;
1309
1310                 if (scan_should_stop())
1311                         break;
1312
1313                 kasan_disable_current();
1314                 pointer = *ptr;
1315                 kasan_enable_current();
1316
1317                 if (pointer < min_addr || pointer >= max_addr)
1318                         continue;
1319
1320                 /*
1321                  * No need for get_object() here since we hold kmemleak_lock.
1322                  * object->use_count cannot be dropped to 0 while the object
1323                  * is still present in object_tree_root and object_list
1324                  * (with updates protected by kmemleak_lock).
1325                  */
1326                 object = lookup_object(pointer, 1);
1327                 if (!object)
1328                         continue;
1329                 if (object == scanned)
1330                         /* self referenced, ignore */
1331                         continue;
1332
1333                 /*
1334                  * Avoid the lockdep recursive warning on object->lock being
1335                  * previously acquired in scan_object(). These locks are
1336                  * enclosed by scan_mutex.
1337                  */
1338                 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1339                 /* only pass surplus references (object already gray) */
1340                 if (color_gray(object)) {
1341                         excess_ref = object->excess_ref;
1342                         /* no need for update_refs() if object already gray */
1343                 } else {
1344                         excess_ref = 0;
1345                         update_refs(object);
1346                 }
1347                 spin_unlock(&object->lock);
1348
1349                 if (excess_ref) {
1350                         object = lookup_object(excess_ref, 0);
1351                         if (!object)
1352                                 continue;
1353                         if (object == scanned)
1354                                 /* circular reference, ignore */
1355                                 continue;
1356                         spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1357                         update_refs(object);
1358                         spin_unlock(&object->lock);
1359                 }
1360         }
1361         read_unlock_irqrestore(&kmemleak_lock, flags);
1362 }
1363
1364 /*
1365  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1366  */
1367 #ifdef CONFIG_SMP
1368 static void scan_large_block(void *start, void *end)
1369 {
1370         void *next;
1371
1372         while (start < end) {
1373                 next = min(start + MAX_SCAN_SIZE, end);
1374                 scan_block(start, next, NULL);
1375                 start = next;
1376                 cond_resched();
1377         }
1378 }
1379 #endif
1380
1381 /*
1382  * Scan a memory block corresponding to a kmemleak_object. A condition is
1383  * that object->use_count >= 1.
1384  */
1385 static void scan_object(struct kmemleak_object *object)
1386 {
1387         struct kmemleak_scan_area *area;
1388         unsigned long flags;
1389
1390         /*
1391          * Once the object->lock is acquired, the corresponding memory block
1392          * cannot be freed (the same lock is acquired in delete_object).
1393          */
1394         spin_lock_irqsave(&object->lock, flags);
1395         if (object->flags & OBJECT_NO_SCAN)
1396                 goto out;
1397         if (!(object->flags & OBJECT_ALLOCATED))
1398                 /* already freed object */
1399                 goto out;
1400         if (hlist_empty(&object->area_list)) {
1401                 void *start = (void *)object->pointer;
1402                 void *end = (void *)(object->pointer + object->size);
1403                 void *next;
1404
1405                 do {
1406                         next = min(start + MAX_SCAN_SIZE, end);
1407                         scan_block(start, next, object);
1408
1409                         start = next;
1410                         if (start >= end)
1411                                 break;
1412
1413                         spin_unlock_irqrestore(&object->lock, flags);
1414                         cond_resched();
1415                         spin_lock_irqsave(&object->lock, flags);
1416                 } while (object->flags & OBJECT_ALLOCATED);
1417         } else
1418                 hlist_for_each_entry(area, &object->area_list, node)
1419                         scan_block((void *)area->start,
1420                                    (void *)(area->start + area->size),
1421                                    object);
1422 out:
1423         spin_unlock_irqrestore(&object->lock, flags);
1424 }
1425
1426 /*
1427  * Scan the objects already referenced (gray objects). More objects will be
1428  * referenced and, if there are no memory leaks, all the objects are scanned.
1429  */
1430 static void scan_gray_list(void)
1431 {
1432         struct kmemleak_object *object, *tmp;
1433
1434         /*
1435          * The list traversal is safe for both tail additions and removals
1436          * from inside the loop. The kmemleak objects cannot be freed from
1437          * outside the loop because their use_count was incremented.
1438          */
1439         object = list_entry(gray_list.next, typeof(*object), gray_list);
1440         while (&object->gray_list != &gray_list) {
1441                 cond_resched();
1442
1443                 /* may add new objects to the list */
1444                 if (!scan_should_stop())
1445                         scan_object(object);
1446
1447                 tmp = list_entry(object->gray_list.next, typeof(*object),
1448                                  gray_list);
1449
1450                 /* remove the object from the list and release it */
1451                 list_del(&object->gray_list);
1452                 put_object(object);
1453
1454                 object = tmp;
1455         }
1456         WARN_ON(!list_empty(&gray_list));
1457 }
1458
1459 /*
1460  * Scan data sections and all the referenced memory blocks allocated via the
1461  * kernel's standard allocators. This function must be called with the
1462  * scan_mutex held.
1463  */
1464 static void kmemleak_scan(void)
1465 {
1466         unsigned long flags;
1467         struct kmemleak_object *object;
1468         int i;
1469         int new_leaks = 0;
1470
1471         jiffies_last_scan = jiffies;
1472
1473         /* prepare the kmemleak_object's */
1474         rcu_read_lock();
1475         list_for_each_entry_rcu(object, &object_list, object_list) {
1476                 spin_lock_irqsave(&object->lock, flags);
1477 #ifdef DEBUG
1478                 /*
1479                  * With a few exceptions there should be a maximum of
1480                  * 1 reference to any object at this point.
1481                  */
1482                 if (atomic_read(&object->use_count) > 1) {
1483                         pr_debug("object->use_count = %d\n",
1484                                  atomic_read(&object->use_count));
1485                         dump_object_info(object);
1486                 }
1487 #endif
1488                 /* reset the reference count (whiten the object) */
1489                 object->count = 0;
1490                 if (color_gray(object) && get_object(object))
1491                         list_add_tail(&object->gray_list, &gray_list);
1492
1493                 spin_unlock_irqrestore(&object->lock, flags);
1494         }
1495         rcu_read_unlock();
1496
1497 #ifdef CONFIG_SMP
1498         /* per-cpu sections scanning */
1499         for_each_possible_cpu(i)
1500                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1501                                  __per_cpu_end + per_cpu_offset(i));
1502 #endif
1503
1504         /*
1505          * Struct page scanning for each node.
1506          */
1507         get_online_mems();
1508         for_each_online_node(i) {
1509                 unsigned long start_pfn = node_start_pfn(i);
1510                 unsigned long end_pfn = node_end_pfn(i);
1511                 unsigned long pfn;
1512
1513                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1514                         struct page *page;
1515
1516                         if (!pfn_valid(pfn))
1517                                 continue;
1518                         page = pfn_to_page(pfn);
1519                         /* only scan if page is in use */
1520                         if (page_count(page) == 0)
1521                                 continue;
1522                         scan_block(page, page + 1, NULL);
1523                         if (!(pfn & 63))
1524                                 cond_resched();
1525                 }
1526         }
1527         put_online_mems();
1528
1529         /*
1530          * Scanning the task stacks (may introduce false negatives).
1531          */
1532         if (kmemleak_stack_scan) {
1533                 struct task_struct *p, *g;
1534
1535                 read_lock(&tasklist_lock);
1536                 do_each_thread(g, p) {
1537                         void *stack = try_get_task_stack(p);
1538                         if (stack) {
1539                                 scan_block(stack, stack + THREAD_SIZE, NULL);
1540                                 put_task_stack(p);
1541                         }
1542                 } while_each_thread(g, p);
1543                 read_unlock(&tasklist_lock);
1544         }
1545
1546         /*
1547          * Scan the objects already referenced from the sections scanned
1548          * above.
1549          */
1550         scan_gray_list();
1551
1552         /*
1553          * Check for new or unreferenced objects modified since the previous
1554          * scan and color them gray until the next scan.
1555          */
1556         rcu_read_lock();
1557         list_for_each_entry_rcu(object, &object_list, object_list) {
1558                 spin_lock_irqsave(&object->lock, flags);
1559                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1560                     && update_checksum(object) && get_object(object)) {
1561                         /* color it gray temporarily */
1562                         object->count = object->min_count;
1563                         list_add_tail(&object->gray_list, &gray_list);
1564                 }
1565                 spin_unlock_irqrestore(&object->lock, flags);
1566         }
1567         rcu_read_unlock();
1568
1569         /*
1570          * Re-scan the gray list for modified unreferenced objects.
1571          */
1572         scan_gray_list();
1573
1574         /*
1575          * If scanning was stopped do not report any new unreferenced objects.
1576          */
1577         if (scan_should_stop())
1578                 return;
1579
1580         /*
1581          * Scanning result reporting.
1582          */
1583         rcu_read_lock();
1584         list_for_each_entry_rcu(object, &object_list, object_list) {
1585                 spin_lock_irqsave(&object->lock, flags);
1586                 if (unreferenced_object(object) &&
1587                     !(object->flags & OBJECT_REPORTED)) {
1588                         object->flags |= OBJECT_REPORTED;
1589                         new_leaks++;
1590                 }
1591                 spin_unlock_irqrestore(&object->lock, flags);
1592         }
1593         rcu_read_unlock();
1594
1595         if (new_leaks) {
1596                 kmemleak_found_leaks = true;
1597
1598                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1599                         new_leaks);
1600         }
1601
1602 }
1603
1604 /*
1605  * Thread function performing automatic memory scanning. Unreferenced objects
1606  * at the end of a memory scan are reported but only the first time.
1607  */
1608 static int kmemleak_scan_thread(void *arg)
1609 {
1610         static int first_run = 1;
1611
1612         pr_info("Automatic memory scanning thread started\n");
1613         set_user_nice(current, 10);
1614
1615         /*
1616          * Wait before the first scan to allow the system to fully initialize.
1617          */
1618         if (first_run) {
1619                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1620                 first_run = 0;
1621                 while (timeout && !kthread_should_stop())
1622                         timeout = schedule_timeout_interruptible(timeout);
1623         }
1624
1625         while (!kthread_should_stop()) {
1626                 signed long timeout = jiffies_scan_wait;
1627
1628                 mutex_lock(&scan_mutex);
1629                 kmemleak_scan();
1630                 mutex_unlock(&scan_mutex);
1631
1632                 /* wait before the next scan */
1633                 while (timeout && !kthread_should_stop())
1634                         timeout = schedule_timeout_interruptible(timeout);
1635         }
1636
1637         pr_info("Automatic memory scanning thread ended\n");
1638
1639         return 0;
1640 }
1641
1642 /*
1643  * Start the automatic memory scanning thread. This function must be called
1644  * with the scan_mutex held.
1645  */
1646 static void start_scan_thread(void)
1647 {
1648         if (scan_thread)
1649                 return;
1650         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1651         if (IS_ERR(scan_thread)) {
1652                 pr_warn("Failed to create the scan thread\n");
1653                 scan_thread = NULL;
1654         }
1655 }
1656
1657 /*
1658  * Stop the automatic memory scanning thread.
1659  */
1660 static void stop_scan_thread(void)
1661 {
1662         if (scan_thread) {
1663                 kthread_stop(scan_thread);
1664                 scan_thread = NULL;
1665         }
1666 }
1667
1668 /*
1669  * Iterate over the object_list and return the first valid object at or after
1670  * the required position with its use_count incremented. The function triggers
1671  * a memory scanning when the pos argument points to the first position.
1672  */
1673 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1674 {
1675         struct kmemleak_object *object;
1676         loff_t n = *pos;
1677         int err;
1678
1679         err = mutex_lock_interruptible(&scan_mutex);
1680         if (err < 0)
1681                 return ERR_PTR(err);
1682
1683         rcu_read_lock();
1684         list_for_each_entry_rcu(object, &object_list, object_list) {
1685                 if (n-- > 0)
1686                         continue;
1687                 if (get_object(object))
1688                         goto out;
1689         }
1690         object = NULL;
1691 out:
1692         return object;
1693 }
1694
1695 /*
1696  * Return the next object in the object_list. The function decrements the
1697  * use_count of the previous object and increases that of the next one.
1698  */
1699 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1700 {
1701         struct kmemleak_object *prev_obj = v;
1702         struct kmemleak_object *next_obj = NULL;
1703         struct kmemleak_object *obj = prev_obj;
1704
1705         ++(*pos);
1706
1707         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1708                 if (get_object(obj)) {
1709                         next_obj = obj;
1710                         break;
1711                 }
1712         }
1713
1714         put_object(prev_obj);
1715         return next_obj;
1716 }
1717
1718 /*
1719  * Decrement the use_count of the last object required, if any.
1720  */
1721 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1722 {
1723         if (!IS_ERR(v)) {
1724                 /*
1725                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1726                  * waiting was interrupted, so only release it if !IS_ERR.
1727                  */
1728                 rcu_read_unlock();
1729                 mutex_unlock(&scan_mutex);
1730                 if (v)
1731                         put_object(v);
1732         }
1733 }
1734
1735 /*
1736  * Print the information for an unreferenced object to the seq file.
1737  */
1738 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1739 {
1740         struct kmemleak_object *object = v;
1741         unsigned long flags;
1742
1743         spin_lock_irqsave(&object->lock, flags);
1744         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1745                 print_unreferenced(seq, object);
1746         spin_unlock_irqrestore(&object->lock, flags);
1747         return 0;
1748 }
1749
1750 static const struct seq_operations kmemleak_seq_ops = {
1751         .start = kmemleak_seq_start,
1752         .next  = kmemleak_seq_next,
1753         .stop  = kmemleak_seq_stop,
1754         .show  = kmemleak_seq_show,
1755 };
1756
1757 static int kmemleak_open(struct inode *inode, struct file *file)
1758 {
1759         return seq_open(file, &kmemleak_seq_ops);
1760 }
1761
1762 static int dump_str_object_info(const char *str)
1763 {
1764         unsigned long flags;
1765         struct kmemleak_object *object;
1766         unsigned long addr;
1767
1768         if (kstrtoul(str, 0, &addr))
1769                 return -EINVAL;
1770         object = find_and_get_object(addr, 0);
1771         if (!object) {
1772                 pr_info("Unknown object at 0x%08lx\n", addr);
1773                 return -EINVAL;
1774         }
1775
1776         spin_lock_irqsave(&object->lock, flags);
1777         dump_object_info(object);
1778         spin_unlock_irqrestore(&object->lock, flags);
1779
1780         put_object(object);
1781         return 0;
1782 }
1783
1784 /*
1785  * We use grey instead of black to ensure we can do future scans on the same
1786  * objects. If we did not do future scans these black objects could
1787  * potentially contain references to newly allocated objects in the future and
1788  * we'd end up with false positives.
1789  */
1790 static void kmemleak_clear(void)
1791 {
1792         struct kmemleak_object *object;
1793         unsigned long flags;
1794
1795         rcu_read_lock();
1796         list_for_each_entry_rcu(object, &object_list, object_list) {
1797                 spin_lock_irqsave(&object->lock, flags);
1798                 if ((object->flags & OBJECT_REPORTED) &&
1799                     unreferenced_object(object))
1800                         __paint_it(object, KMEMLEAK_GREY);
1801                 spin_unlock_irqrestore(&object->lock, flags);
1802         }
1803         rcu_read_unlock();
1804
1805         kmemleak_found_leaks = false;
1806 }
1807
1808 static void __kmemleak_do_cleanup(void);
1809
1810 /*
1811  * File write operation to configure kmemleak at run-time. The following
1812  * commands can be written to the /sys/kernel/debug/kmemleak file:
1813  *   off        - disable kmemleak (irreversible)
1814  *   stack=on   - enable the task stacks scanning
1815  *   stack=off  - disable the tasks stacks scanning
1816  *   scan=on    - start the automatic memory scanning thread
1817  *   scan=off   - stop the automatic memory scanning thread
1818  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1819  *                disable it)
1820  *   scan       - trigger a memory scan
1821  *   clear      - mark all current reported unreferenced kmemleak objects as
1822  *                grey to ignore printing them, or free all kmemleak objects
1823  *                if kmemleak has been disabled.
1824  *   dump=...   - dump information about the object found at the given address
1825  */
1826 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1827                               size_t size, loff_t *ppos)
1828 {
1829         char buf[64];
1830         int buf_size;
1831         int ret;
1832
1833         buf_size = min(size, (sizeof(buf) - 1));
1834         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1835                 return -EFAULT;
1836         buf[buf_size] = 0;
1837
1838         ret = mutex_lock_interruptible(&scan_mutex);
1839         if (ret < 0)
1840                 return ret;
1841
1842         if (strncmp(buf, "clear", 5) == 0) {
1843                 if (kmemleak_enabled)
1844                         kmemleak_clear();
1845                 else
1846                         __kmemleak_do_cleanup();
1847                 goto out;
1848         }
1849
1850         if (!kmemleak_enabled) {
1851                 ret = -EBUSY;
1852                 goto out;
1853         }
1854
1855         if (strncmp(buf, "off", 3) == 0)
1856                 kmemleak_disable();
1857         else if (strncmp(buf, "stack=on", 8) == 0)
1858                 kmemleak_stack_scan = 1;
1859         else if (strncmp(buf, "stack=off", 9) == 0)
1860                 kmemleak_stack_scan = 0;
1861         else if (strncmp(buf, "scan=on", 7) == 0)
1862                 start_scan_thread();
1863         else if (strncmp(buf, "scan=off", 8) == 0)
1864                 stop_scan_thread();
1865         else if (strncmp(buf, "scan=", 5) == 0) {
1866                 unsigned long secs;
1867
1868                 ret = kstrtoul(buf + 5, 0, &secs);
1869                 if (ret < 0)
1870                         goto out;
1871                 stop_scan_thread();
1872                 if (secs) {
1873                         jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1874                         start_scan_thread();
1875                 }
1876         } else if (strncmp(buf, "scan", 4) == 0)
1877                 kmemleak_scan();
1878         else if (strncmp(buf, "dump=", 5) == 0)
1879                 ret = dump_str_object_info(buf + 5);
1880         else
1881                 ret = -EINVAL;
1882
1883 out:
1884         mutex_unlock(&scan_mutex);
1885         if (ret < 0)
1886                 return ret;
1887
1888         /* ignore the rest of the buffer, only one command at a time */
1889         *ppos += size;
1890         return size;
1891 }
1892
1893 static const struct file_operations kmemleak_fops = {
1894         .owner          = THIS_MODULE,
1895         .open           = kmemleak_open,
1896         .read           = seq_read,
1897         .write          = kmemleak_write,
1898         .llseek         = seq_lseek,
1899         .release        = seq_release,
1900 };
1901
1902 static void __kmemleak_do_cleanup(void)
1903 {
1904         struct kmemleak_object *object;
1905
1906         rcu_read_lock();
1907         list_for_each_entry_rcu(object, &object_list, object_list)
1908                 delete_object_full(object->pointer);
1909         rcu_read_unlock();
1910 }
1911
1912 /*
1913  * Stop the memory scanning thread and free the kmemleak internal objects if
1914  * no previous scan thread (otherwise, kmemleak may still have some useful
1915  * information on memory leaks).
1916  */
1917 static void kmemleak_do_cleanup(struct work_struct *work)
1918 {
1919         stop_scan_thread();
1920
1921         mutex_lock(&scan_mutex);
1922         /*
1923          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1924          * longer track object freeing. Ordering of the scan thread stopping and
1925          * the memory accesses below is guaranteed by the kthread_stop()
1926          * function.
1927          */
1928         kmemleak_free_enabled = 0;
1929         mutex_unlock(&scan_mutex);
1930
1931         if (!kmemleak_found_leaks)
1932                 __kmemleak_do_cleanup();
1933         else
1934                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1935 }
1936
1937 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1938
1939 /*
1940  * Disable kmemleak. No memory allocation/freeing will be traced once this
1941  * function is called. Disabling kmemleak is an irreversible operation.
1942  */
1943 static void kmemleak_disable(void)
1944 {
1945         /* atomically check whether it was already invoked */
1946         if (cmpxchg(&kmemleak_error, 0, 1))
1947                 return;
1948
1949         /* stop any memory operation tracing */
1950         kmemleak_enabled = 0;
1951
1952         /* check whether it is too early for a kernel thread */
1953         if (kmemleak_initialized)
1954                 schedule_work(&cleanup_work);
1955         else
1956                 kmemleak_free_enabled = 0;
1957
1958         pr_info("Kernel memory leak detector disabled\n");
1959 }
1960
1961 /*
1962  * Allow boot-time kmemleak disabling (enabled by default).
1963  */
1964 static int kmemleak_boot_config(char *str)
1965 {
1966         if (!str)
1967                 return -EINVAL;
1968         if (strcmp(str, "off") == 0)
1969                 kmemleak_disable();
1970         else if (strcmp(str, "on") == 0)
1971                 kmemleak_skip_disable = 1;
1972         else
1973                 return -EINVAL;
1974         return 0;
1975 }
1976 early_param("kmemleak", kmemleak_boot_config);
1977
1978 static void __init print_log_trace(struct early_log *log)
1979 {
1980         struct stack_trace trace;
1981
1982         trace.nr_entries = log->trace_len;
1983         trace.entries = log->trace;
1984
1985         pr_notice("Early log backtrace:\n");
1986         print_stack_trace(&trace, 2);
1987 }
1988
1989 /*
1990  * Kmemleak initialization.
1991  */
1992 void __init kmemleak_init(void)
1993 {
1994         int i;
1995         unsigned long flags;
1996
1997 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1998         if (!kmemleak_skip_disable) {
1999                 kmemleak_early_log = 0;
2000                 kmemleak_disable();
2001                 return;
2002         }
2003 #endif
2004
2005         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2006         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2007
2008         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2009         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2010
2011         if (crt_early_log > ARRAY_SIZE(early_log))
2012                 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2013                         crt_early_log);
2014
2015         /* the kernel is still in UP mode, so disabling the IRQs is enough */
2016         local_irq_save(flags);
2017         kmemleak_early_log = 0;
2018         if (kmemleak_error) {
2019                 local_irq_restore(flags);
2020                 return;
2021         } else {
2022                 kmemleak_enabled = 1;
2023                 kmemleak_free_enabled = 1;
2024         }
2025         local_irq_restore(flags);
2026
2027         /* register the data/bss sections */
2028         create_object((unsigned long)_sdata, _edata - _sdata,
2029                       KMEMLEAK_GREY, GFP_ATOMIC);
2030         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2031                       KMEMLEAK_GREY, GFP_ATOMIC);
2032         /* only register .data..ro_after_init if not within .data */
2033         if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2034                 create_object((unsigned long)__start_ro_after_init,
2035                               __end_ro_after_init - __start_ro_after_init,
2036                               KMEMLEAK_GREY, GFP_ATOMIC);
2037
2038         /*
2039          * This is the point where tracking allocations is safe. Automatic
2040          * scanning is started during the late initcall. Add the early logged
2041          * callbacks to the kmemleak infrastructure.
2042          */
2043         for (i = 0; i < crt_early_log; i++) {
2044                 struct early_log *log = &early_log[i];
2045
2046                 switch (log->op_type) {
2047                 case KMEMLEAK_ALLOC:
2048                         early_alloc(log);
2049                         break;
2050                 case KMEMLEAK_ALLOC_PERCPU:
2051                         early_alloc_percpu(log);
2052                         break;
2053                 case KMEMLEAK_FREE:
2054                         kmemleak_free(log->ptr);
2055                         break;
2056                 case KMEMLEAK_FREE_PART:
2057                         kmemleak_free_part(log->ptr, log->size);
2058                         break;
2059                 case KMEMLEAK_FREE_PERCPU:
2060                         kmemleak_free_percpu(log->ptr);
2061                         break;
2062                 case KMEMLEAK_NOT_LEAK:
2063                         kmemleak_not_leak(log->ptr);
2064                         break;
2065                 case KMEMLEAK_IGNORE:
2066                         kmemleak_ignore(log->ptr);
2067                         break;
2068                 case KMEMLEAK_SCAN_AREA:
2069                         kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2070                         break;
2071                 case KMEMLEAK_NO_SCAN:
2072                         kmemleak_no_scan(log->ptr);
2073                         break;
2074                 case KMEMLEAK_SET_EXCESS_REF:
2075                         object_set_excess_ref((unsigned long)log->ptr,
2076                                               log->excess_ref);
2077                         break;
2078                 default:
2079                         kmemleak_warn("Unknown early log operation: %d\n",
2080                                       log->op_type);
2081                 }
2082
2083                 if (kmemleak_warning) {
2084                         print_log_trace(log);
2085                         kmemleak_warning = 0;
2086                 }
2087         }
2088 }
2089
2090 /*
2091  * Late initialization function.
2092  */
2093 static int __init kmemleak_late_init(void)
2094 {
2095         struct dentry *dentry;
2096
2097         kmemleak_initialized = 1;
2098
2099         if (kmemleak_error) {
2100                 /*
2101                  * Some error occurred and kmemleak was disabled. There is a
2102                  * small chance that kmemleak_disable() was called immediately
2103                  * after setting kmemleak_initialized and we may end up with
2104                  * two clean-up threads but serialized by scan_mutex.
2105                  */
2106                 schedule_work(&cleanup_work);
2107                 return -ENOMEM;
2108         }
2109
2110         dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2111                                      &kmemleak_fops);
2112         if (!dentry)
2113                 pr_warn("Failed to create the debugfs kmemleak file\n");
2114         mutex_lock(&scan_mutex);
2115         start_scan_thread();
2116         mutex_unlock(&scan_mutex);
2117
2118         pr_info("Kernel memory leak detector initialized\n");
2119
2120         return 0;
2121 }
2122 late_initcall(kmemleak_late_init);