2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
23 #define ODEBUG_HASH_BITS 14
24 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26 #define ODEBUG_POOL_SIZE 1024
27 #define ODEBUG_POOL_MIN_LEVEL 256
28 #define ODEBUG_POOL_PERCPU_SIZE 64
29 #define ODEBUG_BATCH_SIZE 16
31 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
32 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
33 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
36 * We limit the freeing of debug objects via workqueue at a maximum
37 * frequency of 10Hz and about 1024 objects for each freeing operation.
38 * So it is freeing at most 10k debug objects per second.
40 #define ODEBUG_FREE_WORK_MAX 1024
41 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
44 struct hlist_head list;
49 * Debug object percpu free list
50 * Access is protected by disabling irq
52 struct debug_percpu_free {
53 struct hlist_head free_objs;
57 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
61 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63 static DEFINE_RAW_SPINLOCK(pool_lock);
65 static HLIST_HEAD(obj_pool);
66 static HLIST_HEAD(obj_to_free);
69 * Because of the presence of percpu free pools, obj_pool_free will
70 * under-count those in the percpu free pools. Similarly, obj_pool_used
71 * will over-count those in the percpu free pools. Adjustments will be
72 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
75 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
76 static int obj_pool_free = ODEBUG_POOL_SIZE;
77 static int obj_pool_used;
78 static int obj_pool_max_used;
79 static bool obj_freeing;
80 /* The number of objs on the global free list */
81 static int obj_nr_tofree;
83 static int debug_objects_maxchain __read_mostly;
84 static int __maybe_unused debug_objects_maxchecked __read_mostly;
85 static int debug_objects_fixups __read_mostly;
86 static int debug_objects_warnings __read_mostly;
87 static int debug_objects_enabled __read_mostly
88 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89 static int debug_objects_pool_size __read_mostly
91 static int debug_objects_pool_min_level __read_mostly
92 = ODEBUG_POOL_MIN_LEVEL;
93 static struct debug_obj_descr *descr_test __read_mostly;
94 static struct kmem_cache *obj_cache __read_mostly;
97 * Track numbers of kmem_cache_alloc()/free() calls done.
99 static int debug_objects_allocated;
100 static int debug_objects_freed;
102 static void free_obj_work(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 static int __init enable_object_debug(char *str)
107 debug_objects_enabled = 1;
111 static int __init disable_object_debug(char *str)
113 debug_objects_enabled = 0;
117 early_param("debug_objects", enable_object_debug);
118 early_param("no_debug_objects", disable_object_debug);
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121 [ODEBUG_STATE_NONE] = "none",
122 [ODEBUG_STATE_INIT] = "initialized",
123 [ODEBUG_STATE_INACTIVE] = "inactive",
124 [ODEBUG_STATE_ACTIVE] = "active",
125 [ODEBUG_STATE_DESTROYED] = "destroyed",
126 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
129 static void fill_pool(void)
131 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
132 struct debug_obj *obj;
135 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
139 * Reuse objs from the global free list; they will be reinitialized
142 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
143 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
146 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
147 raw_spin_lock_irqsave(&pool_lock, flags);
149 * Recheck with the lock held as the worker thread might have
150 * won the race and freed the global free list already.
152 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
153 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
154 hlist_del(&obj->node);
155 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
156 hlist_add_head(&obj->node, &obj_pool);
157 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159 raw_spin_unlock_irqrestore(&pool_lock, flags);
162 if (unlikely(!obj_cache))
165 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
166 struct debug_obj *new[ODEBUG_BATCH_SIZE];
169 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
170 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
177 raw_spin_lock_irqsave(&pool_lock, flags);
179 hlist_add_head(&new[--cnt]->node, &obj_pool);
180 debug_objects_allocated++;
181 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183 raw_spin_unlock_irqrestore(&pool_lock, flags);
188 * Lookup an object in the hash bucket.
190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 struct debug_obj *obj;
195 hlist_for_each_entry(obj, &b->list, node) {
197 if (obj->object == addr)
200 if (cnt > debug_objects_maxchain)
201 debug_objects_maxchain = cnt;
207 * Allocate a new object from the hlist
209 static struct debug_obj *__alloc_object(struct hlist_head *list)
211 struct debug_obj *obj = NULL;
214 obj = hlist_entry(list->first, typeof(*obj), node);
215 hlist_del(&obj->node);
221 static struct debug_obj *
222 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
224 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225 struct debug_obj *obj;
227 if (likely(obj_cache)) {
228 obj = __alloc_object(&percpu_pool->free_objs);
230 percpu_pool->obj_free--;
235 raw_spin_lock(&pool_lock);
236 obj = __alloc_object(&obj_pool);
239 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
242 * Looking ahead, allocate one batch of debug objects and
243 * put them into the percpu free pool.
245 if (likely(obj_cache)) {
248 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249 struct debug_obj *obj2;
251 obj2 = __alloc_object(&obj_pool);
254 hlist_add_head(&obj2->node,
255 &percpu_pool->free_objs);
256 percpu_pool->obj_free++;
258 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
262 if (obj_pool_used > obj_pool_max_used)
263 obj_pool_max_used = obj_pool_used;
265 if (obj_pool_free < obj_pool_min_free)
266 obj_pool_min_free = obj_pool_free;
268 raw_spin_unlock(&pool_lock);
274 obj->state = ODEBUG_STATE_NONE;
276 hlist_add_head(&obj->node, &b->list);
282 * workqueue function to free objects.
284 * To reduce contention on the global pool_lock, the actual freeing of
285 * debug objects will be delayed if the pool_lock is busy.
287 static void free_obj_work(struct work_struct *work)
289 struct hlist_node *tmp;
290 struct debug_obj *obj;
294 WRITE_ONCE(obj_freeing, false);
295 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
298 if (obj_pool_free >= debug_objects_pool_size)
302 * The objs on the pool list might be allocated before the work is
303 * run, so recheck if pool list it full or not, if not fill pool
304 * list from the global free list. As it is likely that a workload
305 * may be gearing up to use more and more objects, don't free any
306 * of them until the next round.
308 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
309 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
310 hlist_del(&obj->node);
311 hlist_add_head(&obj->node, &obj_pool);
312 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
313 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
315 raw_spin_unlock_irqrestore(&pool_lock, flags);
320 * Pool list is already full and there are still objs on the free
321 * list. Move remaining free objs to a temporary list to free the
322 * memory outside the pool_lock held region.
325 hlist_move_list(&obj_to_free, &tofree);
326 debug_objects_freed += obj_nr_tofree;
327 WRITE_ONCE(obj_nr_tofree, 0);
329 raw_spin_unlock_irqrestore(&pool_lock, flags);
331 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
332 hlist_del(&obj->node);
333 kmem_cache_free(obj_cache, obj);
337 static void __free_object(struct debug_obj *obj)
339 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340 struct debug_percpu_free *percpu_pool;
341 int lookahead_count = 0;
345 local_irq_save(flags);
347 goto free_to_obj_pool;
350 * Try to free it into the percpu pool first.
352 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354 hlist_add_head(&obj->node, &percpu_pool->free_objs);
355 percpu_pool->obj_free++;
356 local_irq_restore(flags);
361 * As the percpu pool is full, look ahead and pull out a batch
362 * of objects from the percpu pool and free them as well.
364 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366 if (!objs[lookahead_count])
368 percpu_pool->obj_free--;
372 raw_spin_lock(&pool_lock);
373 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
378 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
379 hlist_add_head(&obj->node, &obj_to_free);
380 if (lookahead_count) {
381 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
382 obj_pool_used -= lookahead_count;
383 while (lookahead_count) {
384 hlist_add_head(&objs[--lookahead_count]->node,
389 if ((obj_pool_free > debug_objects_pool_size) &&
390 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
394 * Free one more batch of objects from obj_pool.
396 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397 obj = __alloc_object(&obj_pool);
398 hlist_add_head(&obj->node, &obj_to_free);
399 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
400 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
404 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
405 hlist_add_head(&obj->node, &obj_pool);
406 if (lookahead_count) {
407 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
408 obj_pool_used -= lookahead_count;
409 while (lookahead_count) {
410 hlist_add_head(&objs[--lookahead_count]->node,
415 raw_spin_unlock(&pool_lock);
416 local_irq_restore(flags);
420 * Put the object back into the pool and schedule work to free objects
423 static void free_object(struct debug_obj *obj)
426 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
427 WRITE_ONCE(obj_freeing, true);
428 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
433 * We run out of memory. That means we probably have tons of objects
436 static void debug_objects_oom(void)
438 struct debug_bucket *db = obj_hash;
439 struct hlist_node *tmp;
440 HLIST_HEAD(freelist);
441 struct debug_obj *obj;
445 pr_warn("Out of memory. ODEBUG disabled\n");
447 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
448 raw_spin_lock_irqsave(&db->lock, flags);
449 hlist_move_list(&db->list, &freelist);
450 raw_spin_unlock_irqrestore(&db->lock, flags);
453 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
454 hlist_del(&obj->node);
461 * We use the pfn of the address for the hash. That way we can check
462 * for freed objects simply by checking the affected bucket.
464 static struct debug_bucket *get_bucket(unsigned long addr)
468 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
469 return &obj_hash[hash];
472 static void debug_print_object(struct debug_obj *obj, char *msg)
474 struct debug_obj_descr *descr = obj->descr;
478 * Don't report if lookup_object_or_alloc() by the current thread
479 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
480 * concurrent thread turned off debug_objects_enabled and cleared
483 if (!debug_objects_enabled)
486 if (limit < 5 && descr != descr_test) {
487 void *hint = descr->debug_hint ?
488 descr->debug_hint(obj->object) : NULL;
490 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
491 "object type: %s hint: %pS\n",
492 msg, obj_states[obj->state], obj->astate,
495 debug_objects_warnings++;
499 * Try to repair the damage, so we have a better chance to get useful
503 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
504 void * addr, enum debug_obj_state state)
506 if (fixup && fixup(addr, state)) {
507 debug_objects_fixups++;
513 static void debug_object_is_on_stack(void *addr, int onstack)
521 is_on_stack = object_is_on_stack(addr);
522 if (is_on_stack == onstack)
527 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
528 task_stack_page(current));
530 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
531 task_stack_page(current));
536 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
537 struct debug_obj_descr *descr,
538 bool onstack, bool alloc_ifstatic)
540 struct debug_obj *obj = lookup_object(addr, b);
541 enum debug_obj_state state = ODEBUG_STATE_NONE;
547 * debug_object_init() unconditionally allocates untracked
548 * objects. It does not matter whether it is a static object or
551 * debug_object_assert_init() and debug_object_activate() allow
552 * allocation only if the descriptor callback confirms that the
553 * object is static and considered initialized. For non-static
554 * objects the allocation needs to be done from the fixup callback.
556 if (unlikely(alloc_ifstatic)) {
557 if (!descr->is_static_object || !descr->is_static_object(addr))
558 return ERR_PTR(-ENOENT);
559 /* Statically allocated objects are considered initialized */
560 state = ODEBUG_STATE_INIT;
563 obj = alloc_object(addr, b, descr);
566 debug_object_is_on_stack(addr, onstack);
570 /* Out of memory. Do the cleanup outside of the locked region */
571 debug_objects_enabled = 0;
575 static void debug_objects_fill_pool(void)
578 * On RT enabled kernels the pool refill must happen in preemptible
581 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
586 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
588 enum debug_obj_state state;
589 struct debug_bucket *db;
590 struct debug_obj *obj;
593 debug_objects_fill_pool();
595 db = get_bucket((unsigned long) addr);
597 raw_spin_lock_irqsave(&db->lock, flags);
599 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
600 if (unlikely(!obj)) {
601 raw_spin_unlock_irqrestore(&db->lock, flags);
606 switch (obj->state) {
607 case ODEBUG_STATE_NONE:
608 case ODEBUG_STATE_INIT:
609 case ODEBUG_STATE_INACTIVE:
610 obj->state = ODEBUG_STATE_INIT;
613 case ODEBUG_STATE_ACTIVE:
615 raw_spin_unlock_irqrestore(&db->lock, flags);
616 debug_print_object(obj, "init");
617 debug_object_fixup(descr->fixup_init, addr, state);
620 case ODEBUG_STATE_DESTROYED:
621 raw_spin_unlock_irqrestore(&db->lock, flags);
622 debug_print_object(obj, "init");
628 raw_spin_unlock_irqrestore(&db->lock, flags);
632 * debug_object_init - debug checks when an object is initialized
633 * @addr: address of the object
634 * @descr: pointer to an object specific debug description structure
636 void debug_object_init(void *addr, struct debug_obj_descr *descr)
638 if (!debug_objects_enabled)
641 __debug_object_init(addr, descr, 0);
643 EXPORT_SYMBOL_GPL(debug_object_init);
646 * debug_object_init_on_stack - debug checks when an object on stack is
648 * @addr: address of the object
649 * @descr: pointer to an object specific debug description structure
651 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
653 if (!debug_objects_enabled)
656 __debug_object_init(addr, descr, 1);
658 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
661 * debug_object_activate - debug checks when an object is activated
662 * @addr: address of the object
663 * @descr: pointer to an object specific debug description structure
664 * Returns 0 for success, -EINVAL for check failed.
666 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
668 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
669 enum debug_obj_state state;
670 struct debug_bucket *db;
671 struct debug_obj *obj;
675 if (!debug_objects_enabled)
678 debug_objects_fill_pool();
680 db = get_bucket((unsigned long) addr);
682 raw_spin_lock_irqsave(&db->lock, flags);
684 obj = lookup_object_or_alloc(addr, db, descr, false, true);
685 if (likely(!IS_ERR_OR_NULL(obj))) {
686 bool print_object = false;
688 switch (obj->state) {
689 case ODEBUG_STATE_INIT:
690 case ODEBUG_STATE_INACTIVE:
691 obj->state = ODEBUG_STATE_ACTIVE;
695 case ODEBUG_STATE_ACTIVE:
697 raw_spin_unlock_irqrestore(&db->lock, flags);
698 debug_print_object(obj, "activate");
699 ret = debug_object_fixup(descr->fixup_activate, addr, state);
700 return ret ? 0 : -EINVAL;
702 case ODEBUG_STATE_DESTROYED:
710 raw_spin_unlock_irqrestore(&db->lock, flags);
712 debug_print_object(obj, "activate");
716 raw_spin_unlock_irqrestore(&db->lock, flags);
718 /* If NULL the allocation has hit OOM */
724 /* Object is neither static nor tracked. It's not initialized */
725 debug_print_object(&o, "activate");
726 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
727 return ret ? 0 : -EINVAL;
729 EXPORT_SYMBOL_GPL(debug_object_activate);
732 * debug_object_deactivate - debug checks when an object is deactivated
733 * @addr: address of the object
734 * @descr: pointer to an object specific debug description structure
736 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
738 struct debug_bucket *db;
739 struct debug_obj *obj;
741 bool print_object = false;
743 if (!debug_objects_enabled)
746 db = get_bucket((unsigned long) addr);
748 raw_spin_lock_irqsave(&db->lock, flags);
750 obj = lookup_object(addr, db);
752 switch (obj->state) {
753 case ODEBUG_STATE_INIT:
754 case ODEBUG_STATE_INACTIVE:
755 case ODEBUG_STATE_ACTIVE:
757 obj->state = ODEBUG_STATE_INACTIVE;
762 case ODEBUG_STATE_DESTROYED:
770 raw_spin_unlock_irqrestore(&db->lock, flags);
772 struct debug_obj o = { .object = addr,
773 .state = ODEBUG_STATE_NOTAVAILABLE,
776 debug_print_object(&o, "deactivate");
777 } else if (print_object) {
778 debug_print_object(obj, "deactivate");
781 EXPORT_SYMBOL_GPL(debug_object_deactivate);
784 * debug_object_destroy - debug checks when an object is destroyed
785 * @addr: address of the object
786 * @descr: pointer to an object specific debug description structure
788 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
790 enum debug_obj_state state;
791 struct debug_bucket *db;
792 struct debug_obj *obj;
794 bool print_object = false;
796 if (!debug_objects_enabled)
799 db = get_bucket((unsigned long) addr);
801 raw_spin_lock_irqsave(&db->lock, flags);
803 obj = lookup_object(addr, db);
807 switch (obj->state) {
808 case ODEBUG_STATE_NONE:
809 case ODEBUG_STATE_INIT:
810 case ODEBUG_STATE_INACTIVE:
811 obj->state = ODEBUG_STATE_DESTROYED;
813 case ODEBUG_STATE_ACTIVE:
815 raw_spin_unlock_irqrestore(&db->lock, flags);
816 debug_print_object(obj, "destroy");
817 debug_object_fixup(descr->fixup_destroy, addr, state);
820 case ODEBUG_STATE_DESTROYED:
827 raw_spin_unlock_irqrestore(&db->lock, flags);
829 debug_print_object(obj, "destroy");
831 EXPORT_SYMBOL_GPL(debug_object_destroy);
834 * debug_object_free - debug checks when an object is freed
835 * @addr: address of the object
836 * @descr: pointer to an object specific debug description structure
838 void debug_object_free(void *addr, struct debug_obj_descr *descr)
840 enum debug_obj_state state;
841 struct debug_bucket *db;
842 struct debug_obj *obj;
845 if (!debug_objects_enabled)
848 db = get_bucket((unsigned long) addr);
850 raw_spin_lock_irqsave(&db->lock, flags);
852 obj = lookup_object(addr, db);
856 switch (obj->state) {
857 case ODEBUG_STATE_ACTIVE:
859 raw_spin_unlock_irqrestore(&db->lock, flags);
860 debug_print_object(obj, "free");
861 debug_object_fixup(descr->fixup_free, addr, state);
864 hlist_del(&obj->node);
865 raw_spin_unlock_irqrestore(&db->lock, flags);
870 raw_spin_unlock_irqrestore(&db->lock, flags);
872 EXPORT_SYMBOL_GPL(debug_object_free);
875 * debug_object_assert_init - debug checks when object should be init-ed
876 * @addr: address of the object
877 * @descr: pointer to an object specific debug description structure
879 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
881 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
882 struct debug_bucket *db;
883 struct debug_obj *obj;
886 if (!debug_objects_enabled)
889 debug_objects_fill_pool();
891 db = get_bucket((unsigned long) addr);
893 raw_spin_lock_irqsave(&db->lock, flags);
894 obj = lookup_object_or_alloc(addr, db, descr, false, true);
895 raw_spin_unlock_irqrestore(&db->lock, flags);
896 if (likely(!IS_ERR_OR_NULL(obj)))
899 /* If NULL the allocation has hit OOM */
905 /* Object is neither tracked nor static. It's not initialized. */
906 debug_print_object(&o, "assert_init");
907 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
909 EXPORT_SYMBOL_GPL(debug_object_assert_init);
912 * debug_object_active_state - debug checks object usage state machine
913 * @addr: address of the object
914 * @descr: pointer to an object specific debug description structure
915 * @expect: expected state
916 * @next: state to move to if expected state is found
919 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
920 unsigned int expect, unsigned int next)
922 struct debug_bucket *db;
923 struct debug_obj *obj;
925 bool print_object = false;
927 if (!debug_objects_enabled)
930 db = get_bucket((unsigned long) addr);
932 raw_spin_lock_irqsave(&db->lock, flags);
934 obj = lookup_object(addr, db);
936 switch (obj->state) {
937 case ODEBUG_STATE_ACTIVE:
938 if (obj->astate == expect)
950 raw_spin_unlock_irqrestore(&db->lock, flags);
952 struct debug_obj o = { .object = addr,
953 .state = ODEBUG_STATE_NOTAVAILABLE,
956 debug_print_object(&o, "active_state");
957 } else if (print_object) {
958 debug_print_object(obj, "active_state");
961 EXPORT_SYMBOL_GPL(debug_object_active_state);
963 #ifdef CONFIG_DEBUG_OBJECTS_FREE
964 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
966 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
967 struct debug_obj_descr *descr;
968 enum debug_obj_state state;
969 struct debug_bucket *db;
970 struct hlist_node *tmp;
971 struct debug_obj *obj;
972 int cnt, objs_checked = 0;
974 saddr = (unsigned long) address;
975 eaddr = saddr + size;
976 paddr = saddr & ODEBUG_CHUNK_MASK;
977 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
978 chunks >>= ODEBUG_CHUNK_SHIFT;
980 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
981 db = get_bucket(paddr);
985 raw_spin_lock_irqsave(&db->lock, flags);
986 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
988 oaddr = (unsigned long) obj->object;
989 if (oaddr < saddr || oaddr >= eaddr)
992 switch (obj->state) {
993 case ODEBUG_STATE_ACTIVE:
996 raw_spin_unlock_irqrestore(&db->lock, flags);
997 debug_print_object(obj, "free");
998 debug_object_fixup(descr->fixup_free,
999 (void *) oaddr, state);
1002 hlist_del(&obj->node);
1007 raw_spin_unlock_irqrestore(&db->lock, flags);
1009 if (cnt > debug_objects_maxchain)
1010 debug_objects_maxchain = cnt;
1012 objs_checked += cnt;
1015 if (objs_checked > debug_objects_maxchecked)
1016 debug_objects_maxchecked = objs_checked;
1018 /* Schedule work to actually kmem_cache_free() objects */
1019 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1020 WRITE_ONCE(obj_freeing, true);
1021 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1025 void debug_check_no_obj_freed(const void *address, unsigned long size)
1027 if (debug_objects_enabled)
1028 __debug_check_no_obj_freed(address, size);
1032 #ifdef CONFIG_DEBUG_FS
1034 static int debug_stats_show(struct seq_file *m, void *v)
1036 int cpu, obj_percpu_free = 0;
1038 for_each_possible_cpu(cpu)
1039 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1041 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1042 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1043 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1044 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1045 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1046 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1047 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1048 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1049 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1050 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1051 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1052 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1056 static int debug_stats_open(struct inode *inode, struct file *filp)
1058 return single_open(filp, debug_stats_show, NULL);
1061 static const struct file_operations debug_stats_fops = {
1062 .open = debug_stats_open,
1064 .llseek = seq_lseek,
1065 .release = single_release,
1068 static int __init debug_objects_init_debugfs(void)
1070 struct dentry *dbgdir;
1072 if (!debug_objects_enabled)
1075 dbgdir = debugfs_create_dir("debug_objects", NULL);
1077 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1081 __initcall(debug_objects_init_debugfs);
1084 static inline void debug_objects_init_debugfs(void) { }
1087 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1089 /* Random data structure for the self test */
1091 unsigned long dummy1[6];
1093 unsigned long dummy2[3];
1096 static __initdata struct debug_obj_descr descr_type_test;
1098 static bool __init is_static_object(void *addr)
1100 struct self_test *obj = addr;
1102 return obj->static_init;
1106 * fixup_init is called when:
1107 * - an active object is initialized
1109 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1111 struct self_test *obj = addr;
1114 case ODEBUG_STATE_ACTIVE:
1115 debug_object_deactivate(obj, &descr_type_test);
1116 debug_object_init(obj, &descr_type_test);
1124 * fixup_activate is called when:
1125 * - an active object is activated
1126 * - an unknown non-static object is activated
1128 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1130 struct self_test *obj = addr;
1133 case ODEBUG_STATE_NOTAVAILABLE:
1135 case ODEBUG_STATE_ACTIVE:
1136 debug_object_deactivate(obj, &descr_type_test);
1137 debug_object_activate(obj, &descr_type_test);
1146 * fixup_destroy is called when:
1147 * - an active object is destroyed
1149 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1151 struct self_test *obj = addr;
1154 case ODEBUG_STATE_ACTIVE:
1155 debug_object_deactivate(obj, &descr_type_test);
1156 debug_object_destroy(obj, &descr_type_test);
1164 * fixup_free is called when:
1165 * - an active object is freed
1167 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1169 struct self_test *obj = addr;
1172 case ODEBUG_STATE_ACTIVE:
1173 debug_object_deactivate(obj, &descr_type_test);
1174 debug_object_free(obj, &descr_type_test);
1182 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1184 struct debug_bucket *db;
1185 struct debug_obj *obj;
1186 unsigned long flags;
1189 db = get_bucket((unsigned long) addr);
1191 raw_spin_lock_irqsave(&db->lock, flags);
1193 obj = lookup_object(addr, db);
1194 if (!obj && state != ODEBUG_STATE_NONE) {
1195 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1198 if (obj && obj->state != state) {
1199 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1203 if (fixups != debug_objects_fixups) {
1204 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1205 fixups, debug_objects_fixups);
1208 if (warnings != debug_objects_warnings) {
1209 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1210 warnings, debug_objects_warnings);
1215 raw_spin_unlock_irqrestore(&db->lock, flags);
1217 debug_objects_enabled = 0;
1221 static __initdata struct debug_obj_descr descr_type_test = {
1223 .is_static_object = is_static_object,
1224 .fixup_init = fixup_init,
1225 .fixup_activate = fixup_activate,
1226 .fixup_destroy = fixup_destroy,
1227 .fixup_free = fixup_free,
1230 static __initdata struct self_test obj = { .static_init = 0 };
1232 static void __init debug_objects_selftest(void)
1234 int fixups, oldfixups, warnings, oldwarnings;
1235 unsigned long flags;
1237 local_irq_save(flags);
1239 fixups = oldfixups = debug_objects_fixups;
1240 warnings = oldwarnings = debug_objects_warnings;
1241 descr_test = &descr_type_test;
1243 debug_object_init(&obj, &descr_type_test);
1244 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1246 debug_object_activate(&obj, &descr_type_test);
1247 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1249 debug_object_activate(&obj, &descr_type_test);
1250 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1252 debug_object_deactivate(&obj, &descr_type_test);
1253 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1255 debug_object_destroy(&obj, &descr_type_test);
1256 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1258 debug_object_init(&obj, &descr_type_test);
1259 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1261 debug_object_activate(&obj, &descr_type_test);
1262 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1264 debug_object_deactivate(&obj, &descr_type_test);
1265 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1267 debug_object_free(&obj, &descr_type_test);
1268 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1271 obj.static_init = 1;
1272 debug_object_activate(&obj, &descr_type_test);
1273 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1275 debug_object_init(&obj, &descr_type_test);
1276 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1278 debug_object_free(&obj, &descr_type_test);
1279 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1282 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1283 debug_object_init(&obj, &descr_type_test);
1284 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1286 debug_object_activate(&obj, &descr_type_test);
1287 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1289 __debug_check_no_obj_freed(&obj, sizeof(obj));
1290 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1293 pr_info("selftest passed\n");
1296 debug_objects_fixups = oldfixups;
1297 debug_objects_warnings = oldwarnings;
1300 local_irq_restore(flags);
1303 static inline void debug_objects_selftest(void) { }
1307 * Called during early boot to initialize the hash buckets and link
1308 * the static object pool objects into the poll list. After this call
1309 * the object tracker is fully operational.
1311 void __init debug_objects_early_init(void)
1315 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1316 raw_spin_lock_init(&obj_hash[i].lock);
1318 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1319 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1323 * Convert the statically allocated objects to dynamic ones:
1325 static int __init debug_objects_replace_static_objects(void)
1327 struct debug_bucket *db = obj_hash;
1328 struct hlist_node *tmp;
1329 struct debug_obj *obj, *new;
1330 HLIST_HEAD(objects);
1333 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1334 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1337 hlist_add_head(&obj->node, &objects);
1341 * debug_objects_mem_init() is now called early that only one CPU is up
1342 * and interrupts have been disabled, so it is safe to replace the
1343 * active object references.
1346 /* Remove the statically allocated objects from the pool */
1347 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1348 hlist_del(&obj->node);
1349 /* Move the allocated objects to the pool */
1350 hlist_move_list(&objects, &obj_pool);
1352 /* Replace the active object references */
1353 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1354 hlist_move_list(&db->list, &objects);
1356 hlist_for_each_entry(obj, &objects, node) {
1357 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1358 hlist_del(&new->node);
1359 /* copy object data */
1361 hlist_add_head(&new->node, &db->list);
1366 pr_debug("%d of %d active objects replaced\n",
1367 cnt, obj_pool_used);
1370 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1371 hlist_del(&obj->node);
1372 kmem_cache_free(obj_cache, obj);
1378 * Called after the kmem_caches are functional to setup a dedicated
1379 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1380 * prevents that the debug code is called on kmem_cache_free() for the
1381 * debug tracker objects to avoid recursive calls.
1383 void __init debug_objects_mem_init(void)
1387 if (!debug_objects_enabled)
1391 * Initialize the percpu object pools
1393 * Initialization is not strictly necessary, but was done for
1396 for_each_possible_cpu(cpu)
1397 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1399 obj_cache = kmem_cache_create("debug_objects_cache",
1400 sizeof (struct debug_obj), 0,
1401 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1404 if (!obj_cache || debug_objects_replace_static_objects()) {
1405 debug_objects_enabled = 0;
1406 kmem_cache_destroy(obj_cache);
1407 pr_warn("out of memory.\n");
1409 debug_objects_selftest();
1412 * Increase the thresholds for allocating and freeing objects
1413 * according to the number of possible CPUs available in the system.
1415 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1416 debug_objects_pool_size += extras;
1417 debug_objects_pool_min_level += extras;