2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
24 #define ODEBUG_HASH_BITS 14
25 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
27 #define ODEBUG_POOL_SIZE 1024
28 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_POOL_PERCPU_SIZE 64
30 #define ODEBUG_BATCH_SIZE 16
32 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
37 * We limit the freeing of debug objects via workqueue at a maximum
38 * frequency of 10Hz and about 1024 objects for each freeing operation.
39 * So it is freeing at most 10k debug objects per second.
41 #define ODEBUG_FREE_WORK_MAX 1024
42 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
45 struct hlist_head list;
50 * Debug object percpu free list
51 * Access is protected by disabling irq
53 struct debug_percpu_free {
54 struct hlist_head free_objs;
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
60 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
62 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
64 static DEFINE_RAW_SPINLOCK(pool_lock);
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
70 * Because of the presence of percpu free pools, obj_pool_free will
71 * under-count those in the percpu free pools. Similarly, obj_pool_used
72 * will over-count those in the percpu free pools. Adjustments will be
73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
76 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int obj_pool_free = ODEBUG_POOL_SIZE;
78 static int obj_pool_used;
79 static int obj_pool_max_used;
80 static bool obj_freeing;
81 /* The number of objs on the global free list */
82 static int obj_nr_tofree;
84 static int debug_objects_maxchain __read_mostly;
85 static int __maybe_unused debug_objects_maxchecked __read_mostly;
86 static int debug_objects_fixups __read_mostly;
87 static int debug_objects_warnings __read_mostly;
88 static int debug_objects_enabled __read_mostly
89 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int debug_objects_pool_size __read_mostly
92 static int debug_objects_pool_min_level __read_mostly
93 = ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test __read_mostly;
95 static struct kmem_cache *obj_cache __read_mostly;
98 * Track numbers of kmem_cache_alloc()/free() calls done.
100 static int debug_objects_allocated;
101 static int debug_objects_freed;
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
106 static int __init enable_object_debug(char *str)
108 debug_objects_enabled = 1;
112 static int __init disable_object_debug(char *str)
114 debug_objects_enabled = 0;
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122 [ODEBUG_STATE_NONE] = "none",
123 [ODEBUG_STATE_INIT] = "initialized",
124 [ODEBUG_STATE_INACTIVE] = "inactive",
125 [ODEBUG_STATE_ACTIVE] = "active",
126 [ODEBUG_STATE_DESTROYED] = "destroyed",
127 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
130 static void fill_pool(void)
132 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
133 struct debug_obj *obj;
136 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
140 * Reuse objs from the global free list; they will be reinitialized
143 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
147 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148 raw_spin_lock_irqsave(&pool_lock, flags);
150 * Recheck with the lock held as the worker thread might have
151 * won the race and freed the global free list already.
153 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 hlist_del(&obj->node);
156 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 hlist_add_head(&obj->node, &obj_pool);
158 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
160 raw_spin_unlock_irqrestore(&pool_lock, flags);
163 if (unlikely(!obj_cache))
166 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 struct debug_obj *new[ODEBUG_BATCH_SIZE];
170 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
178 raw_spin_lock_irqsave(&pool_lock, flags);
180 hlist_add_head(&new[--cnt]->node, &obj_pool);
181 debug_objects_allocated++;
182 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
184 raw_spin_unlock_irqrestore(&pool_lock, flags);
189 * Lookup an object in the hash bucket.
191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
193 struct debug_obj *obj;
196 hlist_for_each_entry(obj, &b->list, node) {
198 if (obj->object == addr)
201 if (cnt > debug_objects_maxchain)
202 debug_objects_maxchain = cnt;
208 * Allocate a new object from the hlist
210 static struct debug_obj *__alloc_object(struct hlist_head *list)
212 struct debug_obj *obj = NULL;
215 obj = hlist_entry(list->first, typeof(*obj), node);
216 hlist_del(&obj->node);
222 static struct debug_obj *
223 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
225 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
226 struct debug_obj *obj;
228 if (likely(obj_cache)) {
229 obj = __alloc_object(&percpu_pool->free_objs);
231 percpu_pool->obj_free--;
236 raw_spin_lock(&pool_lock);
237 obj = __alloc_object(&obj_pool);
240 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
243 * Looking ahead, allocate one batch of debug objects and
244 * put them into the percpu free pool.
246 if (likely(obj_cache)) {
249 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250 struct debug_obj *obj2;
252 obj2 = __alloc_object(&obj_pool);
255 hlist_add_head(&obj2->node,
256 &percpu_pool->free_objs);
257 percpu_pool->obj_free++;
259 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
263 if (obj_pool_used > obj_pool_max_used)
264 obj_pool_max_used = obj_pool_used;
266 if (obj_pool_free < obj_pool_min_free)
267 obj_pool_min_free = obj_pool_free;
269 raw_spin_unlock(&pool_lock);
275 obj->state = ODEBUG_STATE_NONE;
277 hlist_add_head(&obj->node, &b->list);
283 * workqueue function to free objects.
285 * To reduce contention on the global pool_lock, the actual freeing of
286 * debug objects will be delayed if the pool_lock is busy.
288 static void free_obj_work(struct work_struct *work)
290 struct hlist_node *tmp;
291 struct debug_obj *obj;
295 WRITE_ONCE(obj_freeing, false);
296 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
299 if (obj_pool_free >= debug_objects_pool_size)
303 * The objs on the pool list might be allocated before the work is
304 * run, so recheck if pool list it full or not, if not fill pool
305 * list from the global free list. As it is likely that a workload
306 * may be gearing up to use more and more objects, don't free any
307 * of them until the next round.
309 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311 hlist_del(&obj->node);
312 hlist_add_head(&obj->node, &obj_pool);
313 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
316 raw_spin_unlock_irqrestore(&pool_lock, flags);
321 * Pool list is already full and there are still objs on the free
322 * list. Move remaining free objs to a temporary list to free the
323 * memory outside the pool_lock held region.
326 hlist_move_list(&obj_to_free, &tofree);
327 debug_objects_freed += obj_nr_tofree;
328 WRITE_ONCE(obj_nr_tofree, 0);
330 raw_spin_unlock_irqrestore(&pool_lock, flags);
332 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333 hlist_del(&obj->node);
334 kmem_cache_free(obj_cache, obj);
338 static void __free_object(struct debug_obj *obj)
340 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341 struct debug_percpu_free *percpu_pool;
342 int lookahead_count = 0;
346 local_irq_save(flags);
348 goto free_to_obj_pool;
351 * Try to free it into the percpu pool first.
353 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
354 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
355 hlist_add_head(&obj->node, &percpu_pool->free_objs);
356 percpu_pool->obj_free++;
357 local_irq_restore(flags);
362 * As the percpu pool is full, look ahead and pull out a batch
363 * of objects from the percpu pool and free them as well.
365 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367 if (!objs[lookahead_count])
369 percpu_pool->obj_free--;
373 raw_spin_lock(&pool_lock);
374 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
380 hlist_add_head(&obj->node, &obj_to_free);
381 if (lookahead_count) {
382 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
383 obj_pool_used -= lookahead_count;
384 while (lookahead_count) {
385 hlist_add_head(&objs[--lookahead_count]->node,
390 if ((obj_pool_free > debug_objects_pool_size) &&
391 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
395 * Free one more batch of objects from obj_pool.
397 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398 obj = __alloc_object(&obj_pool);
399 hlist_add_head(&obj->node, &obj_to_free);
400 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
405 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
406 hlist_add_head(&obj->node, &obj_pool);
407 if (lookahead_count) {
408 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
409 obj_pool_used -= lookahead_count;
410 while (lookahead_count) {
411 hlist_add_head(&objs[--lookahead_count]->node,
416 raw_spin_unlock(&pool_lock);
417 local_irq_restore(flags);
421 * Put the object back into the pool and schedule work to free objects
424 static void free_object(struct debug_obj *obj)
427 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
428 WRITE_ONCE(obj_freeing, true);
429 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
433 #ifdef CONFIG_HOTPLUG_CPU
434 static int object_cpu_offline(unsigned int cpu)
436 struct debug_percpu_free *percpu_pool;
437 struct hlist_node *tmp;
438 struct debug_obj *obj;
441 /* Remote access is safe as the CPU is dead already */
442 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444 hlist_del(&obj->node);
445 kmem_cache_free(obj_cache, obj);
448 raw_spin_lock_irqsave(&pool_lock, flags);
449 obj_pool_used -= percpu_pool->obj_free;
450 debug_objects_freed += percpu_pool->obj_free;
451 raw_spin_unlock_irqrestore(&pool_lock, flags);
453 percpu_pool->obj_free = 0;
460 * We run out of memory. That means we probably have tons of objects
463 static void debug_objects_oom(void)
465 struct debug_bucket *db = obj_hash;
466 struct hlist_node *tmp;
467 HLIST_HEAD(freelist);
468 struct debug_obj *obj;
472 pr_warn("Out of memory. ODEBUG disabled\n");
474 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
475 raw_spin_lock_irqsave(&db->lock, flags);
476 hlist_move_list(&db->list, &freelist);
477 raw_spin_unlock_irqrestore(&db->lock, flags);
480 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
481 hlist_del(&obj->node);
488 * We use the pfn of the address for the hash. That way we can check
489 * for freed objects simply by checking the affected bucket.
491 static struct debug_bucket *get_bucket(unsigned long addr)
495 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496 return &obj_hash[hash];
499 static void debug_print_object(struct debug_obj *obj, char *msg)
501 const struct debug_obj_descr *descr = obj->descr;
504 if (limit < 5 && descr != descr_test) {
505 void *hint = descr->debug_hint ?
506 descr->debug_hint(obj->object) : NULL;
508 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
509 "object type: %s hint: %pS\n",
510 msg, obj_states[obj->state], obj->astate,
513 debug_objects_warnings++;
517 * Try to repair the damage, so we have a better chance to get useful
521 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
522 void * addr, enum debug_obj_state state)
524 if (fixup && fixup(addr, state)) {
525 debug_objects_fixups++;
531 static void debug_object_is_on_stack(void *addr, int onstack)
539 is_on_stack = object_is_on_stack(addr);
540 if (is_on_stack == onstack)
545 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
546 task_stack_page(current));
548 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
549 task_stack_page(current));
554 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
555 const struct debug_obj_descr *descr,
556 bool onstack, bool alloc_ifstatic)
558 struct debug_obj *obj = lookup_object(addr, b);
559 enum debug_obj_state state = ODEBUG_STATE_NONE;
565 * debug_object_init() unconditionally allocates untracked
566 * objects. It does not matter whether it is a static object or
569 * debug_object_assert_init() and debug_object_activate() allow
570 * allocation only if the descriptor callback confirms that the
571 * object is static and considered initialized. For non-static
572 * objects the allocation needs to be done from the fixup callback.
574 if (unlikely(alloc_ifstatic)) {
575 if (!descr->is_static_object || !descr->is_static_object(addr))
576 return ERR_PTR(-ENOENT);
577 /* Statically allocated objects are considered initialized */
578 state = ODEBUG_STATE_INIT;
581 obj = alloc_object(addr, b, descr);
584 debug_object_is_on_stack(addr, onstack);
588 /* Out of memory. Do the cleanup outside of the locked region */
589 debug_objects_enabled = 0;
594 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
596 enum debug_obj_state state;
597 struct debug_bucket *db;
598 struct debug_obj *obj;
602 * On RT enabled kernels the pool refill must happen in preemptible
605 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
608 db = get_bucket((unsigned long) addr);
610 raw_spin_lock_irqsave(&db->lock, flags);
612 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
613 if (unlikely(!obj)) {
614 raw_spin_unlock_irqrestore(&db->lock, flags);
619 switch (obj->state) {
620 case ODEBUG_STATE_NONE:
621 case ODEBUG_STATE_INIT:
622 case ODEBUG_STATE_INACTIVE:
623 obj->state = ODEBUG_STATE_INIT;
626 case ODEBUG_STATE_ACTIVE:
628 raw_spin_unlock_irqrestore(&db->lock, flags);
629 debug_print_object(obj, "init");
630 debug_object_fixup(descr->fixup_init, addr, state);
633 case ODEBUG_STATE_DESTROYED:
634 raw_spin_unlock_irqrestore(&db->lock, flags);
635 debug_print_object(obj, "init");
641 raw_spin_unlock_irqrestore(&db->lock, flags);
645 * debug_object_init - debug checks when an object is initialized
646 * @addr: address of the object
647 * @descr: pointer to an object specific debug description structure
649 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
651 if (!debug_objects_enabled)
654 __debug_object_init(addr, descr, 0);
656 EXPORT_SYMBOL_GPL(debug_object_init);
659 * debug_object_init_on_stack - debug checks when an object on stack is
661 * @addr: address of the object
662 * @descr: pointer to an object specific debug description structure
664 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
666 if (!debug_objects_enabled)
669 __debug_object_init(addr, descr, 1);
671 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
674 * debug_object_activate - debug checks when an object is activated
675 * @addr: address of the object
676 * @descr: pointer to an object specific debug description structure
677 * Returns 0 for success, -EINVAL for check failed.
679 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
681 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
682 enum debug_obj_state state;
683 struct debug_bucket *db;
684 struct debug_obj *obj;
688 if (!debug_objects_enabled)
691 db = get_bucket((unsigned long) addr);
693 raw_spin_lock_irqsave(&db->lock, flags);
695 obj = lookup_object_or_alloc(addr, db, descr, false, true);
696 if (likely(!IS_ERR_OR_NULL(obj))) {
697 bool print_object = false;
699 switch (obj->state) {
700 case ODEBUG_STATE_INIT:
701 case ODEBUG_STATE_INACTIVE:
702 obj->state = ODEBUG_STATE_ACTIVE;
706 case ODEBUG_STATE_ACTIVE:
708 raw_spin_unlock_irqrestore(&db->lock, flags);
709 debug_print_object(obj, "activate");
710 ret = debug_object_fixup(descr->fixup_activate, addr, state);
711 return ret ? 0 : -EINVAL;
713 case ODEBUG_STATE_DESTROYED:
721 raw_spin_unlock_irqrestore(&db->lock, flags);
723 debug_print_object(obj, "activate");
727 raw_spin_unlock_irqrestore(&db->lock, flags);
729 /* If NULL the allocation has hit OOM */
735 /* Object is neither static nor tracked. It's not initialized */
736 debug_print_object(&o, "activate");
737 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
738 return ret ? 0 : -EINVAL;
740 EXPORT_SYMBOL_GPL(debug_object_activate);
743 * debug_object_deactivate - debug checks when an object is deactivated
744 * @addr: address of the object
745 * @descr: pointer to an object specific debug description structure
747 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
749 struct debug_bucket *db;
750 struct debug_obj *obj;
752 bool print_object = false;
754 if (!debug_objects_enabled)
757 db = get_bucket((unsigned long) addr);
759 raw_spin_lock_irqsave(&db->lock, flags);
761 obj = lookup_object(addr, db);
763 switch (obj->state) {
764 case ODEBUG_STATE_INIT:
765 case ODEBUG_STATE_INACTIVE:
766 case ODEBUG_STATE_ACTIVE:
768 obj->state = ODEBUG_STATE_INACTIVE;
773 case ODEBUG_STATE_DESTROYED:
781 raw_spin_unlock_irqrestore(&db->lock, flags);
783 struct debug_obj o = { .object = addr,
784 .state = ODEBUG_STATE_NOTAVAILABLE,
787 debug_print_object(&o, "deactivate");
788 } else if (print_object) {
789 debug_print_object(obj, "deactivate");
792 EXPORT_SYMBOL_GPL(debug_object_deactivate);
795 * debug_object_destroy - debug checks when an object is destroyed
796 * @addr: address of the object
797 * @descr: pointer to an object specific debug description structure
799 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
801 enum debug_obj_state state;
802 struct debug_bucket *db;
803 struct debug_obj *obj;
805 bool print_object = false;
807 if (!debug_objects_enabled)
810 db = get_bucket((unsigned long) addr);
812 raw_spin_lock_irqsave(&db->lock, flags);
814 obj = lookup_object(addr, db);
818 switch (obj->state) {
819 case ODEBUG_STATE_NONE:
820 case ODEBUG_STATE_INIT:
821 case ODEBUG_STATE_INACTIVE:
822 obj->state = ODEBUG_STATE_DESTROYED;
824 case ODEBUG_STATE_ACTIVE:
826 raw_spin_unlock_irqrestore(&db->lock, flags);
827 debug_print_object(obj, "destroy");
828 debug_object_fixup(descr->fixup_destroy, addr, state);
831 case ODEBUG_STATE_DESTROYED:
838 raw_spin_unlock_irqrestore(&db->lock, flags);
840 debug_print_object(obj, "destroy");
842 EXPORT_SYMBOL_GPL(debug_object_destroy);
845 * debug_object_free - debug checks when an object is freed
846 * @addr: address of the object
847 * @descr: pointer to an object specific debug description structure
849 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
851 enum debug_obj_state state;
852 struct debug_bucket *db;
853 struct debug_obj *obj;
856 if (!debug_objects_enabled)
859 db = get_bucket((unsigned long) addr);
861 raw_spin_lock_irqsave(&db->lock, flags);
863 obj = lookup_object(addr, db);
867 switch (obj->state) {
868 case ODEBUG_STATE_ACTIVE:
870 raw_spin_unlock_irqrestore(&db->lock, flags);
871 debug_print_object(obj, "free");
872 debug_object_fixup(descr->fixup_free, addr, state);
875 hlist_del(&obj->node);
876 raw_spin_unlock_irqrestore(&db->lock, flags);
881 raw_spin_unlock_irqrestore(&db->lock, flags);
883 EXPORT_SYMBOL_GPL(debug_object_free);
886 * debug_object_assert_init - debug checks when object should be init-ed
887 * @addr: address of the object
888 * @descr: pointer to an object specific debug description structure
890 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
892 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
893 struct debug_bucket *db;
894 struct debug_obj *obj;
897 if (!debug_objects_enabled)
900 db = get_bucket((unsigned long) addr);
902 raw_spin_lock_irqsave(&db->lock, flags);
903 obj = lookup_object_or_alloc(addr, db, descr, false, true);
904 raw_spin_unlock_irqrestore(&db->lock, flags);
905 if (likely(!IS_ERR_OR_NULL(obj)))
908 /* If NULL the allocation has hit OOM */
914 /* Object is neither tracked nor static. It's not initialized. */
915 debug_print_object(&o, "assert_init");
916 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
918 EXPORT_SYMBOL_GPL(debug_object_assert_init);
921 * debug_object_active_state - debug checks object usage state machine
922 * @addr: address of the object
923 * @descr: pointer to an object specific debug description structure
924 * @expect: expected state
925 * @next: state to move to if expected state is found
928 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
929 unsigned int expect, unsigned int next)
931 struct debug_bucket *db;
932 struct debug_obj *obj;
934 bool print_object = false;
936 if (!debug_objects_enabled)
939 db = get_bucket((unsigned long) addr);
941 raw_spin_lock_irqsave(&db->lock, flags);
943 obj = lookup_object(addr, db);
945 switch (obj->state) {
946 case ODEBUG_STATE_ACTIVE:
947 if (obj->astate == expect)
959 raw_spin_unlock_irqrestore(&db->lock, flags);
961 struct debug_obj o = { .object = addr,
962 .state = ODEBUG_STATE_NOTAVAILABLE,
965 debug_print_object(&o, "active_state");
966 } else if (print_object) {
967 debug_print_object(obj, "active_state");
970 EXPORT_SYMBOL_GPL(debug_object_active_state);
972 #ifdef CONFIG_DEBUG_OBJECTS_FREE
973 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
975 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
976 const struct debug_obj_descr *descr;
977 enum debug_obj_state state;
978 struct debug_bucket *db;
979 struct hlist_node *tmp;
980 struct debug_obj *obj;
981 int cnt, objs_checked = 0;
983 saddr = (unsigned long) address;
984 eaddr = saddr + size;
985 paddr = saddr & ODEBUG_CHUNK_MASK;
986 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
987 chunks >>= ODEBUG_CHUNK_SHIFT;
989 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
990 db = get_bucket(paddr);
994 raw_spin_lock_irqsave(&db->lock, flags);
995 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
997 oaddr = (unsigned long) obj->object;
998 if (oaddr < saddr || oaddr >= eaddr)
1001 switch (obj->state) {
1002 case ODEBUG_STATE_ACTIVE:
1005 raw_spin_unlock_irqrestore(&db->lock, flags);
1006 debug_print_object(obj, "free");
1007 debug_object_fixup(descr->fixup_free,
1008 (void *) oaddr, state);
1011 hlist_del(&obj->node);
1016 raw_spin_unlock_irqrestore(&db->lock, flags);
1018 if (cnt > debug_objects_maxchain)
1019 debug_objects_maxchain = cnt;
1021 objs_checked += cnt;
1024 if (objs_checked > debug_objects_maxchecked)
1025 debug_objects_maxchecked = objs_checked;
1027 /* Schedule work to actually kmem_cache_free() objects */
1028 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1029 WRITE_ONCE(obj_freeing, true);
1030 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1034 void debug_check_no_obj_freed(const void *address, unsigned long size)
1036 if (debug_objects_enabled)
1037 __debug_check_no_obj_freed(address, size);
1041 #ifdef CONFIG_DEBUG_FS
1043 static int debug_stats_show(struct seq_file *m, void *v)
1045 int cpu, obj_percpu_free = 0;
1047 for_each_possible_cpu(cpu)
1048 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1050 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1051 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1052 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1053 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1054 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1055 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1056 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1057 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1058 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1059 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1060 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1061 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1064 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1066 static int __init debug_objects_init_debugfs(void)
1068 struct dentry *dbgdir;
1070 if (!debug_objects_enabled)
1073 dbgdir = debugfs_create_dir("debug_objects", NULL);
1075 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1079 __initcall(debug_objects_init_debugfs);
1082 static inline void debug_objects_init_debugfs(void) { }
1085 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1087 /* Random data structure for the self test */
1089 unsigned long dummy1[6];
1091 unsigned long dummy2[3];
1094 static __initconst const struct debug_obj_descr descr_type_test;
1096 static bool __init is_static_object(void *addr)
1098 struct self_test *obj = addr;
1100 return obj->static_init;
1104 * fixup_init is called when:
1105 * - an active object is initialized
1107 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1109 struct self_test *obj = addr;
1112 case ODEBUG_STATE_ACTIVE:
1113 debug_object_deactivate(obj, &descr_type_test);
1114 debug_object_init(obj, &descr_type_test);
1122 * fixup_activate is called when:
1123 * - an active object is activated
1124 * - an unknown non-static object is activated
1126 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1128 struct self_test *obj = addr;
1131 case ODEBUG_STATE_NOTAVAILABLE:
1133 case ODEBUG_STATE_ACTIVE:
1134 debug_object_deactivate(obj, &descr_type_test);
1135 debug_object_activate(obj, &descr_type_test);
1144 * fixup_destroy is called when:
1145 * - an active object is destroyed
1147 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1149 struct self_test *obj = addr;
1152 case ODEBUG_STATE_ACTIVE:
1153 debug_object_deactivate(obj, &descr_type_test);
1154 debug_object_destroy(obj, &descr_type_test);
1162 * fixup_free is called when:
1163 * - an active object is freed
1165 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1167 struct self_test *obj = addr;
1170 case ODEBUG_STATE_ACTIVE:
1171 debug_object_deactivate(obj, &descr_type_test);
1172 debug_object_free(obj, &descr_type_test);
1180 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1182 struct debug_bucket *db;
1183 struct debug_obj *obj;
1184 unsigned long flags;
1187 db = get_bucket((unsigned long) addr);
1189 raw_spin_lock_irqsave(&db->lock, flags);
1191 obj = lookup_object(addr, db);
1192 if (!obj && state != ODEBUG_STATE_NONE) {
1193 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1196 if (obj && obj->state != state) {
1197 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1201 if (fixups != debug_objects_fixups) {
1202 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1203 fixups, debug_objects_fixups);
1206 if (warnings != debug_objects_warnings) {
1207 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1208 warnings, debug_objects_warnings);
1213 raw_spin_unlock_irqrestore(&db->lock, flags);
1215 debug_objects_enabled = 0;
1219 static __initconst const struct debug_obj_descr descr_type_test = {
1221 .is_static_object = is_static_object,
1222 .fixup_init = fixup_init,
1223 .fixup_activate = fixup_activate,
1224 .fixup_destroy = fixup_destroy,
1225 .fixup_free = fixup_free,
1228 static __initdata struct self_test obj = { .static_init = 0 };
1230 static void __init debug_objects_selftest(void)
1232 int fixups, oldfixups, warnings, oldwarnings;
1233 unsigned long flags;
1235 local_irq_save(flags);
1237 fixups = oldfixups = debug_objects_fixups;
1238 warnings = oldwarnings = debug_objects_warnings;
1239 descr_test = &descr_type_test;
1241 debug_object_init(&obj, &descr_type_test);
1242 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1244 debug_object_activate(&obj, &descr_type_test);
1245 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1247 debug_object_activate(&obj, &descr_type_test);
1248 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1250 debug_object_deactivate(&obj, &descr_type_test);
1251 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1253 debug_object_destroy(&obj, &descr_type_test);
1254 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1256 debug_object_init(&obj, &descr_type_test);
1257 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1259 debug_object_activate(&obj, &descr_type_test);
1260 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1262 debug_object_deactivate(&obj, &descr_type_test);
1263 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1265 debug_object_free(&obj, &descr_type_test);
1266 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1269 obj.static_init = 1;
1270 debug_object_activate(&obj, &descr_type_test);
1271 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1273 debug_object_init(&obj, &descr_type_test);
1274 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1276 debug_object_free(&obj, &descr_type_test);
1277 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1280 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1281 debug_object_init(&obj, &descr_type_test);
1282 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1284 debug_object_activate(&obj, &descr_type_test);
1285 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1287 __debug_check_no_obj_freed(&obj, sizeof(obj));
1288 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1291 pr_info("selftest passed\n");
1294 debug_objects_fixups = oldfixups;
1295 debug_objects_warnings = oldwarnings;
1298 local_irq_restore(flags);
1301 static inline void debug_objects_selftest(void) { }
1305 * Called during early boot to initialize the hash buckets and link
1306 * the static object pool objects into the poll list. After this call
1307 * the object tracker is fully operational.
1309 void __init debug_objects_early_init(void)
1313 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1314 raw_spin_lock_init(&obj_hash[i].lock);
1316 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1317 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1321 * Convert the statically allocated objects to dynamic ones:
1323 static int __init debug_objects_replace_static_objects(void)
1325 struct debug_bucket *db = obj_hash;
1326 struct hlist_node *tmp;
1327 struct debug_obj *obj, *new;
1328 HLIST_HEAD(objects);
1331 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1332 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1335 hlist_add_head(&obj->node, &objects);
1338 debug_objects_allocated += i;
1341 * debug_objects_mem_init() is now called early that only one CPU is up
1342 * and interrupts have been disabled, so it is safe to replace the
1343 * active object references.
1346 /* Remove the statically allocated objects from the pool */
1347 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1348 hlist_del(&obj->node);
1349 /* Move the allocated objects to the pool */
1350 hlist_move_list(&objects, &obj_pool);
1352 /* Replace the active object references */
1353 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1354 hlist_move_list(&db->list, &objects);
1356 hlist_for_each_entry(obj, &objects, node) {
1357 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1358 hlist_del(&new->node);
1359 /* copy object data */
1361 hlist_add_head(&new->node, &db->list);
1366 pr_debug("%d of %d active objects replaced\n",
1367 cnt, obj_pool_used);
1370 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1371 hlist_del(&obj->node);
1372 kmem_cache_free(obj_cache, obj);
1378 * Called after the kmem_caches are functional to setup a dedicated
1379 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1380 * prevents that the debug code is called on kmem_cache_free() for the
1381 * debug tracker objects to avoid recursive calls.
1383 void __init debug_objects_mem_init(void)
1387 if (!debug_objects_enabled)
1391 * Initialize the percpu object pools
1393 * Initialization is not strictly necessary, but was done for
1396 for_each_possible_cpu(cpu)
1397 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1399 obj_cache = kmem_cache_create("debug_objects_cache",
1400 sizeof (struct debug_obj), 0,
1401 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1404 if (!obj_cache || debug_objects_replace_static_objects()) {
1405 debug_objects_enabled = 0;
1406 kmem_cache_destroy(obj_cache);
1407 pr_warn("out of memory.\n");
1410 debug_objects_selftest();
1412 #ifdef CONFIG_HOTPLUG_CPU
1413 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1414 object_cpu_offline);
1418 * Increase the thresholds for allocating and freeing objects
1419 * according to the number of possible CPUs available in the system.
1421 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1422 debug_objects_pool_size += extras;
1423 debug_objects_pool_min_level += extras;