GNU Linux-libre 5.15.137-gnu
[releases.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
23
24 #define ODEBUG_HASH_BITS        14
25 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
26
27 #define ODEBUG_POOL_SIZE        1024
28 #define ODEBUG_POOL_MIN_LEVEL   256
29 #define ODEBUG_POOL_PERCPU_SIZE 64
30 #define ODEBUG_BATCH_SIZE       16
31
32 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
35
36 /*
37  * We limit the freeing of debug objects via workqueue at a maximum
38  * frequency of 10Hz and about 1024 objects for each freeing operation.
39  * So it is freeing at most 10k debug objects per second.
40  */
41 #define ODEBUG_FREE_WORK_MAX    1024
42 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
43
44 struct debug_bucket {
45         struct hlist_head       list;
46         raw_spinlock_t          lock;
47 };
48
49 /*
50  * Debug object percpu free list
51  * Access is protected by disabling irq
52  */
53 struct debug_percpu_free {
54         struct hlist_head       free_objs;
55         int                     obj_free;
56 };
57
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59
60 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
61
62 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63
64 static DEFINE_RAW_SPINLOCK(pool_lock);
65
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
68
69 /*
70  * Because of the presence of percpu free pools, obj_pool_free will
71  * under-count those in the percpu free pools. Similarly, obj_pool_used
72  * will over-count those in the percpu free pools. Adjustments will be
73  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74  * can be off.
75  */
76 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
78 static int                      obj_pool_used;
79 static int                      obj_pool_max_used;
80 static bool                     obj_freeing;
81 /* The number of objs on the global free list */
82 static int                      obj_nr_tofree;
83
84 static int                      debug_objects_maxchain __read_mostly;
85 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
86 static int                      debug_objects_fixups __read_mostly;
87 static int                      debug_objects_warnings __read_mostly;
88 static int                      debug_objects_enabled __read_mostly
89                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int                      debug_objects_pool_size __read_mostly
91                                 = ODEBUG_POOL_SIZE;
92 static int                      debug_objects_pool_min_level __read_mostly
93                                 = ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test  __read_mostly;
95 static struct kmem_cache        *obj_cache __read_mostly;
96
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int                      debug_objects_allocated;
101 static int                      debug_objects_freed;
102
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105
106 static int __init enable_object_debug(char *str)
107 {
108         debug_objects_enabled = 1;
109         return 0;
110 }
111
112 static int __init disable_object_debug(char *str)
113 {
114         debug_objects_enabled = 0;
115         return 0;
116 }
117
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
120
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122         [ODEBUG_STATE_NONE]             = "none",
123         [ODEBUG_STATE_INIT]             = "initialized",
124         [ODEBUG_STATE_INACTIVE]         = "inactive",
125         [ODEBUG_STATE_ACTIVE]           = "active",
126         [ODEBUG_STATE_DESTROYED]        = "destroyed",
127         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
128 };
129
130 static void fill_pool(void)
131 {
132         gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
133         struct debug_obj *obj;
134         unsigned long flags;
135
136         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137                 return;
138
139         /*
140          * Reuse objs from the global free list; they will be reinitialized
141          * when allocating.
142          *
143          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145          * sections.
146          */
147         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148                 raw_spin_lock_irqsave(&pool_lock, flags);
149                 /*
150                  * Recheck with the lock held as the worker thread might have
151                  * won the race and freed the global free list already.
152                  */
153                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155                         hlist_del(&obj->node);
156                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157                         hlist_add_head(&obj->node, &obj_pool);
158                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159                 }
160                 raw_spin_unlock_irqrestore(&pool_lock, flags);
161         }
162
163         if (unlikely(!obj_cache))
164                 return;
165
166         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
168                 int cnt;
169
170                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172                         if (!new[cnt])
173                                 break;
174                 }
175                 if (!cnt)
176                         return;
177
178                 raw_spin_lock_irqsave(&pool_lock, flags);
179                 while (cnt) {
180                         hlist_add_head(&new[--cnt]->node, &obj_pool);
181                         debug_objects_allocated++;
182                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183                 }
184                 raw_spin_unlock_irqrestore(&pool_lock, flags);
185         }
186 }
187
188 /*
189  * Lookup an object in the hash bucket.
190  */
191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 {
193         struct debug_obj *obj;
194         int cnt = 0;
195
196         hlist_for_each_entry(obj, &b->list, node) {
197                 cnt++;
198                 if (obj->object == addr)
199                         return obj;
200         }
201         if (cnt > debug_objects_maxchain)
202                 debug_objects_maxchain = cnt;
203
204         return NULL;
205 }
206
207 /*
208  * Allocate a new object from the hlist
209  */
210 static struct debug_obj *__alloc_object(struct hlist_head *list)
211 {
212         struct debug_obj *obj = NULL;
213
214         if (list->first) {
215                 obj = hlist_entry(list->first, typeof(*obj), node);
216                 hlist_del(&obj->node);
217         }
218
219         return obj;
220 }
221
222 static struct debug_obj *
223 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
224 {
225         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
226         struct debug_obj *obj;
227
228         if (likely(obj_cache)) {
229                 obj = __alloc_object(&percpu_pool->free_objs);
230                 if (obj) {
231                         percpu_pool->obj_free--;
232                         goto init_obj;
233                 }
234         }
235
236         raw_spin_lock(&pool_lock);
237         obj = __alloc_object(&obj_pool);
238         if (obj) {
239                 obj_pool_used++;
240                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
241
242                 /*
243                  * Looking ahead, allocate one batch of debug objects and
244                  * put them into the percpu free pool.
245                  */
246                 if (likely(obj_cache)) {
247                         int i;
248
249                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250                                 struct debug_obj *obj2;
251
252                                 obj2 = __alloc_object(&obj_pool);
253                                 if (!obj2)
254                                         break;
255                                 hlist_add_head(&obj2->node,
256                                                &percpu_pool->free_objs);
257                                 percpu_pool->obj_free++;
258                                 obj_pool_used++;
259                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
260                         }
261                 }
262
263                 if (obj_pool_used > obj_pool_max_used)
264                         obj_pool_max_used = obj_pool_used;
265
266                 if (obj_pool_free < obj_pool_min_free)
267                         obj_pool_min_free = obj_pool_free;
268         }
269         raw_spin_unlock(&pool_lock);
270
271 init_obj:
272         if (obj) {
273                 obj->object = addr;
274                 obj->descr  = descr;
275                 obj->state  = ODEBUG_STATE_NONE;
276                 obj->astate = 0;
277                 hlist_add_head(&obj->node, &b->list);
278         }
279         return obj;
280 }
281
282 /*
283  * workqueue function to free objects.
284  *
285  * To reduce contention on the global pool_lock, the actual freeing of
286  * debug objects will be delayed if the pool_lock is busy.
287  */
288 static void free_obj_work(struct work_struct *work)
289 {
290         struct hlist_node *tmp;
291         struct debug_obj *obj;
292         unsigned long flags;
293         HLIST_HEAD(tofree);
294
295         WRITE_ONCE(obj_freeing, false);
296         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
297                 return;
298
299         if (obj_pool_free >= debug_objects_pool_size)
300                 goto free_objs;
301
302         /*
303          * The objs on the pool list might be allocated before the work is
304          * run, so recheck if pool list it full or not, if not fill pool
305          * list from the global free list. As it is likely that a workload
306          * may be gearing up to use more and more objects, don't free any
307          * of them until the next round.
308          */
309         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311                 hlist_del(&obj->node);
312                 hlist_add_head(&obj->node, &obj_pool);
313                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
315         }
316         raw_spin_unlock_irqrestore(&pool_lock, flags);
317         return;
318
319 free_objs:
320         /*
321          * Pool list is already full and there are still objs on the free
322          * list. Move remaining free objs to a temporary list to free the
323          * memory outside the pool_lock held region.
324          */
325         if (obj_nr_tofree) {
326                 hlist_move_list(&obj_to_free, &tofree);
327                 debug_objects_freed += obj_nr_tofree;
328                 WRITE_ONCE(obj_nr_tofree, 0);
329         }
330         raw_spin_unlock_irqrestore(&pool_lock, flags);
331
332         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333                 hlist_del(&obj->node);
334                 kmem_cache_free(obj_cache, obj);
335         }
336 }
337
338 static void __free_object(struct debug_obj *obj)
339 {
340         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341         struct debug_percpu_free *percpu_pool;
342         int lookahead_count = 0;
343         unsigned long flags;
344         bool work;
345
346         local_irq_save(flags);
347         if (!obj_cache)
348                 goto free_to_obj_pool;
349
350         /*
351          * Try to free it into the percpu pool first.
352          */
353         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
354         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
355                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
356                 percpu_pool->obj_free++;
357                 local_irq_restore(flags);
358                 return;
359         }
360
361         /*
362          * As the percpu pool is full, look ahead and pull out a batch
363          * of objects from the percpu pool and free them as well.
364          */
365         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367                 if (!objs[lookahead_count])
368                         break;
369                 percpu_pool->obj_free--;
370         }
371
372 free_to_obj_pool:
373         raw_spin_lock(&pool_lock);
374         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
376         obj_pool_used--;
377
378         if (work) {
379                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
380                 hlist_add_head(&obj->node, &obj_to_free);
381                 if (lookahead_count) {
382                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
383                         obj_pool_used -= lookahead_count;
384                         while (lookahead_count) {
385                                 hlist_add_head(&objs[--lookahead_count]->node,
386                                                &obj_to_free);
387                         }
388                 }
389
390                 if ((obj_pool_free > debug_objects_pool_size) &&
391                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392                         int i;
393
394                         /*
395                          * Free one more batch of objects from obj_pool.
396                          */
397                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398                                 obj = __alloc_object(&obj_pool);
399                                 hlist_add_head(&obj->node, &obj_to_free);
400                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
402                         }
403                 }
404         } else {
405                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
406                 hlist_add_head(&obj->node, &obj_pool);
407                 if (lookahead_count) {
408                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
409                         obj_pool_used -= lookahead_count;
410                         while (lookahead_count) {
411                                 hlist_add_head(&objs[--lookahead_count]->node,
412                                                &obj_pool);
413                         }
414                 }
415         }
416         raw_spin_unlock(&pool_lock);
417         local_irq_restore(flags);
418 }
419
420 /*
421  * Put the object back into the pool and schedule work to free objects
422  * if necessary.
423  */
424 static void free_object(struct debug_obj *obj)
425 {
426         __free_object(obj);
427         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
428                 WRITE_ONCE(obj_freeing, true);
429                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430         }
431 }
432
433 #ifdef CONFIG_HOTPLUG_CPU
434 static int object_cpu_offline(unsigned int cpu)
435 {
436         struct debug_percpu_free *percpu_pool;
437         struct hlist_node *tmp;
438         struct debug_obj *obj;
439         unsigned long flags;
440
441         /* Remote access is safe as the CPU is dead already */
442         percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443         hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444                 hlist_del(&obj->node);
445                 kmem_cache_free(obj_cache, obj);
446         }
447
448         raw_spin_lock_irqsave(&pool_lock, flags);
449         obj_pool_used -= percpu_pool->obj_free;
450         debug_objects_freed += percpu_pool->obj_free;
451         raw_spin_unlock_irqrestore(&pool_lock, flags);
452
453         percpu_pool->obj_free = 0;
454
455         return 0;
456 }
457 #endif
458
459 /*
460  * We run out of memory. That means we probably have tons of objects
461  * allocated.
462  */
463 static void debug_objects_oom(void)
464 {
465         struct debug_bucket *db = obj_hash;
466         struct hlist_node *tmp;
467         HLIST_HEAD(freelist);
468         struct debug_obj *obj;
469         unsigned long flags;
470         int i;
471
472         pr_warn("Out of memory. ODEBUG disabled\n");
473
474         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
475                 raw_spin_lock_irqsave(&db->lock, flags);
476                 hlist_move_list(&db->list, &freelist);
477                 raw_spin_unlock_irqrestore(&db->lock, flags);
478
479                 /* Now free them */
480                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
481                         hlist_del(&obj->node);
482                         free_object(obj);
483                 }
484         }
485 }
486
487 /*
488  * We use the pfn of the address for the hash. That way we can check
489  * for freed objects simply by checking the affected bucket.
490  */
491 static struct debug_bucket *get_bucket(unsigned long addr)
492 {
493         unsigned long hash;
494
495         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496         return &obj_hash[hash];
497 }
498
499 static void debug_print_object(struct debug_obj *obj, char *msg)
500 {
501         const struct debug_obj_descr *descr = obj->descr;
502         static int limit;
503
504         if (limit < 5 && descr != descr_test) {
505                 void *hint = descr->debug_hint ?
506                         descr->debug_hint(obj->object) : NULL;
507                 limit++;
508                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
509                                  "object type: %s hint: %pS\n",
510                         msg, obj_states[obj->state], obj->astate,
511                         descr->name, hint);
512         }
513         debug_objects_warnings++;
514 }
515
516 /*
517  * Try to repair the damage, so we have a better chance to get useful
518  * debug output.
519  */
520 static bool
521 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
522                    void * addr, enum debug_obj_state state)
523 {
524         if (fixup && fixup(addr, state)) {
525                 debug_objects_fixups++;
526                 return true;
527         }
528         return false;
529 }
530
531 static void debug_object_is_on_stack(void *addr, int onstack)
532 {
533         int is_on_stack;
534         static int limit;
535
536         if (limit > 4)
537                 return;
538
539         is_on_stack = object_is_on_stack(addr);
540         if (is_on_stack == onstack)
541                 return;
542
543         limit++;
544         if (is_on_stack)
545                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
546                          task_stack_page(current));
547         else
548                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
549                          task_stack_page(current));
550
551         WARN_ON(1);
552 }
553
554 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
555                                                 const struct debug_obj_descr *descr,
556                                                 bool onstack, bool alloc_ifstatic)
557 {
558         struct debug_obj *obj = lookup_object(addr, b);
559         enum debug_obj_state state = ODEBUG_STATE_NONE;
560
561         if (likely(obj))
562                 return obj;
563
564         /*
565          * debug_object_init() unconditionally allocates untracked
566          * objects. It does not matter whether it is a static object or
567          * not.
568          *
569          * debug_object_assert_init() and debug_object_activate() allow
570          * allocation only if the descriptor callback confirms that the
571          * object is static and considered initialized. For non-static
572          * objects the allocation needs to be done from the fixup callback.
573          */
574         if (unlikely(alloc_ifstatic)) {
575                 if (!descr->is_static_object || !descr->is_static_object(addr))
576                         return ERR_PTR(-ENOENT);
577                 /* Statically allocated objects are considered initialized */
578                 state = ODEBUG_STATE_INIT;
579         }
580
581         obj = alloc_object(addr, b, descr);
582         if (likely(obj)) {
583                 obj->state = state;
584                 debug_object_is_on_stack(addr, onstack);
585                 return obj;
586         }
587
588         /* Out of memory. Do the cleanup outside of the locked region */
589         debug_objects_enabled = 0;
590         return NULL;
591 }
592
593 static void
594 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
595 {
596         enum debug_obj_state state;
597         struct debug_bucket *db;
598         struct debug_obj *obj;
599         unsigned long flags;
600
601         /*
602          * On RT enabled kernels the pool refill must happen in preemptible
603          * context:
604          */
605         if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
606                 fill_pool();
607
608         db = get_bucket((unsigned long) addr);
609
610         raw_spin_lock_irqsave(&db->lock, flags);
611
612         obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
613         if (unlikely(!obj)) {
614                 raw_spin_unlock_irqrestore(&db->lock, flags);
615                 debug_objects_oom();
616                 return;
617         }
618
619         switch (obj->state) {
620         case ODEBUG_STATE_NONE:
621         case ODEBUG_STATE_INIT:
622         case ODEBUG_STATE_INACTIVE:
623                 obj->state = ODEBUG_STATE_INIT;
624                 break;
625
626         case ODEBUG_STATE_ACTIVE:
627                 state = obj->state;
628                 raw_spin_unlock_irqrestore(&db->lock, flags);
629                 debug_print_object(obj, "init");
630                 debug_object_fixup(descr->fixup_init, addr, state);
631                 return;
632
633         case ODEBUG_STATE_DESTROYED:
634                 raw_spin_unlock_irqrestore(&db->lock, flags);
635                 debug_print_object(obj, "init");
636                 return;
637         default:
638                 break;
639         }
640
641         raw_spin_unlock_irqrestore(&db->lock, flags);
642 }
643
644 /**
645  * debug_object_init - debug checks when an object is initialized
646  * @addr:       address of the object
647  * @descr:      pointer to an object specific debug description structure
648  */
649 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
650 {
651         if (!debug_objects_enabled)
652                 return;
653
654         __debug_object_init(addr, descr, 0);
655 }
656 EXPORT_SYMBOL_GPL(debug_object_init);
657
658 /**
659  * debug_object_init_on_stack - debug checks when an object on stack is
660  *                              initialized
661  * @addr:       address of the object
662  * @descr:      pointer to an object specific debug description structure
663  */
664 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
665 {
666         if (!debug_objects_enabled)
667                 return;
668
669         __debug_object_init(addr, descr, 1);
670 }
671 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
672
673 /**
674  * debug_object_activate - debug checks when an object is activated
675  * @addr:       address of the object
676  * @descr:      pointer to an object specific debug description structure
677  * Returns 0 for success, -EINVAL for check failed.
678  */
679 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
680 {
681         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
682         enum debug_obj_state state;
683         struct debug_bucket *db;
684         struct debug_obj *obj;
685         unsigned long flags;
686         int ret;
687
688         if (!debug_objects_enabled)
689                 return 0;
690
691         db = get_bucket((unsigned long) addr);
692
693         raw_spin_lock_irqsave(&db->lock, flags);
694
695         obj = lookup_object_or_alloc(addr, db, descr, false, true);
696         if (likely(!IS_ERR_OR_NULL(obj))) {
697                 bool print_object = false;
698
699                 switch (obj->state) {
700                 case ODEBUG_STATE_INIT:
701                 case ODEBUG_STATE_INACTIVE:
702                         obj->state = ODEBUG_STATE_ACTIVE;
703                         ret = 0;
704                         break;
705
706                 case ODEBUG_STATE_ACTIVE:
707                         state = obj->state;
708                         raw_spin_unlock_irqrestore(&db->lock, flags);
709                         debug_print_object(obj, "activate");
710                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
711                         return ret ? 0 : -EINVAL;
712
713                 case ODEBUG_STATE_DESTROYED:
714                         print_object = true;
715                         ret = -EINVAL;
716                         break;
717                 default:
718                         ret = 0;
719                         break;
720                 }
721                 raw_spin_unlock_irqrestore(&db->lock, flags);
722                 if (print_object)
723                         debug_print_object(obj, "activate");
724                 return ret;
725         }
726
727         raw_spin_unlock_irqrestore(&db->lock, flags);
728
729         /* If NULL the allocation has hit OOM */
730         if (!obj) {
731                 debug_objects_oom();
732                 return 0;
733         }
734
735         /* Object is neither static nor tracked. It's not initialized */
736         debug_print_object(&o, "activate");
737         ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
738         return ret ? 0 : -EINVAL;
739 }
740 EXPORT_SYMBOL_GPL(debug_object_activate);
741
742 /**
743  * debug_object_deactivate - debug checks when an object is deactivated
744  * @addr:       address of the object
745  * @descr:      pointer to an object specific debug description structure
746  */
747 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
748 {
749         struct debug_bucket *db;
750         struct debug_obj *obj;
751         unsigned long flags;
752         bool print_object = false;
753
754         if (!debug_objects_enabled)
755                 return;
756
757         db = get_bucket((unsigned long) addr);
758
759         raw_spin_lock_irqsave(&db->lock, flags);
760
761         obj = lookup_object(addr, db);
762         if (obj) {
763                 switch (obj->state) {
764                 case ODEBUG_STATE_INIT:
765                 case ODEBUG_STATE_INACTIVE:
766                 case ODEBUG_STATE_ACTIVE:
767                         if (!obj->astate)
768                                 obj->state = ODEBUG_STATE_INACTIVE;
769                         else
770                                 print_object = true;
771                         break;
772
773                 case ODEBUG_STATE_DESTROYED:
774                         print_object = true;
775                         break;
776                 default:
777                         break;
778                 }
779         }
780
781         raw_spin_unlock_irqrestore(&db->lock, flags);
782         if (!obj) {
783                 struct debug_obj o = { .object = addr,
784                                        .state = ODEBUG_STATE_NOTAVAILABLE,
785                                        .descr = descr };
786
787                 debug_print_object(&o, "deactivate");
788         } else if (print_object) {
789                 debug_print_object(obj, "deactivate");
790         }
791 }
792 EXPORT_SYMBOL_GPL(debug_object_deactivate);
793
794 /**
795  * debug_object_destroy - debug checks when an object is destroyed
796  * @addr:       address of the object
797  * @descr:      pointer to an object specific debug description structure
798  */
799 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
800 {
801         enum debug_obj_state state;
802         struct debug_bucket *db;
803         struct debug_obj *obj;
804         unsigned long flags;
805         bool print_object = false;
806
807         if (!debug_objects_enabled)
808                 return;
809
810         db = get_bucket((unsigned long) addr);
811
812         raw_spin_lock_irqsave(&db->lock, flags);
813
814         obj = lookup_object(addr, db);
815         if (!obj)
816                 goto out_unlock;
817
818         switch (obj->state) {
819         case ODEBUG_STATE_NONE:
820         case ODEBUG_STATE_INIT:
821         case ODEBUG_STATE_INACTIVE:
822                 obj->state = ODEBUG_STATE_DESTROYED;
823                 break;
824         case ODEBUG_STATE_ACTIVE:
825                 state = obj->state;
826                 raw_spin_unlock_irqrestore(&db->lock, flags);
827                 debug_print_object(obj, "destroy");
828                 debug_object_fixup(descr->fixup_destroy, addr, state);
829                 return;
830
831         case ODEBUG_STATE_DESTROYED:
832                 print_object = true;
833                 break;
834         default:
835                 break;
836         }
837 out_unlock:
838         raw_spin_unlock_irqrestore(&db->lock, flags);
839         if (print_object)
840                 debug_print_object(obj, "destroy");
841 }
842 EXPORT_SYMBOL_GPL(debug_object_destroy);
843
844 /**
845  * debug_object_free - debug checks when an object is freed
846  * @addr:       address of the object
847  * @descr:      pointer to an object specific debug description structure
848  */
849 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
850 {
851         enum debug_obj_state state;
852         struct debug_bucket *db;
853         struct debug_obj *obj;
854         unsigned long flags;
855
856         if (!debug_objects_enabled)
857                 return;
858
859         db = get_bucket((unsigned long) addr);
860
861         raw_spin_lock_irqsave(&db->lock, flags);
862
863         obj = lookup_object(addr, db);
864         if (!obj)
865                 goto out_unlock;
866
867         switch (obj->state) {
868         case ODEBUG_STATE_ACTIVE:
869                 state = obj->state;
870                 raw_spin_unlock_irqrestore(&db->lock, flags);
871                 debug_print_object(obj, "free");
872                 debug_object_fixup(descr->fixup_free, addr, state);
873                 return;
874         default:
875                 hlist_del(&obj->node);
876                 raw_spin_unlock_irqrestore(&db->lock, flags);
877                 free_object(obj);
878                 return;
879         }
880 out_unlock:
881         raw_spin_unlock_irqrestore(&db->lock, flags);
882 }
883 EXPORT_SYMBOL_GPL(debug_object_free);
884
885 /**
886  * debug_object_assert_init - debug checks when object should be init-ed
887  * @addr:       address of the object
888  * @descr:      pointer to an object specific debug description structure
889  */
890 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
891 {
892         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
893         struct debug_bucket *db;
894         struct debug_obj *obj;
895         unsigned long flags;
896
897         if (!debug_objects_enabled)
898                 return;
899
900         db = get_bucket((unsigned long) addr);
901
902         raw_spin_lock_irqsave(&db->lock, flags);
903         obj = lookup_object_or_alloc(addr, db, descr, false, true);
904         raw_spin_unlock_irqrestore(&db->lock, flags);
905         if (likely(!IS_ERR_OR_NULL(obj)))
906                 return;
907
908         /* If NULL the allocation has hit OOM */
909         if (!obj) {
910                 debug_objects_oom();
911                 return;
912         }
913
914         /* Object is neither tracked nor static. It's not initialized. */
915         debug_print_object(&o, "assert_init");
916         debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
917 }
918 EXPORT_SYMBOL_GPL(debug_object_assert_init);
919
920 /**
921  * debug_object_active_state - debug checks object usage state machine
922  * @addr:       address of the object
923  * @descr:      pointer to an object specific debug description structure
924  * @expect:     expected state
925  * @next:       state to move to if expected state is found
926  */
927 void
928 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
929                           unsigned int expect, unsigned int next)
930 {
931         struct debug_bucket *db;
932         struct debug_obj *obj;
933         unsigned long flags;
934         bool print_object = false;
935
936         if (!debug_objects_enabled)
937                 return;
938
939         db = get_bucket((unsigned long) addr);
940
941         raw_spin_lock_irqsave(&db->lock, flags);
942
943         obj = lookup_object(addr, db);
944         if (obj) {
945                 switch (obj->state) {
946                 case ODEBUG_STATE_ACTIVE:
947                         if (obj->astate == expect)
948                                 obj->astate = next;
949                         else
950                                 print_object = true;
951                         break;
952
953                 default:
954                         print_object = true;
955                         break;
956                 }
957         }
958
959         raw_spin_unlock_irqrestore(&db->lock, flags);
960         if (!obj) {
961                 struct debug_obj o = { .object = addr,
962                                        .state = ODEBUG_STATE_NOTAVAILABLE,
963                                        .descr = descr };
964
965                 debug_print_object(&o, "active_state");
966         } else if (print_object) {
967                 debug_print_object(obj, "active_state");
968         }
969 }
970 EXPORT_SYMBOL_GPL(debug_object_active_state);
971
972 #ifdef CONFIG_DEBUG_OBJECTS_FREE
973 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
974 {
975         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
976         const struct debug_obj_descr *descr;
977         enum debug_obj_state state;
978         struct debug_bucket *db;
979         struct hlist_node *tmp;
980         struct debug_obj *obj;
981         int cnt, objs_checked = 0;
982
983         saddr = (unsigned long) address;
984         eaddr = saddr + size;
985         paddr = saddr & ODEBUG_CHUNK_MASK;
986         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
987         chunks >>= ODEBUG_CHUNK_SHIFT;
988
989         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
990                 db = get_bucket(paddr);
991
992 repeat:
993                 cnt = 0;
994                 raw_spin_lock_irqsave(&db->lock, flags);
995                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
996                         cnt++;
997                         oaddr = (unsigned long) obj->object;
998                         if (oaddr < saddr || oaddr >= eaddr)
999                                 continue;
1000
1001                         switch (obj->state) {
1002                         case ODEBUG_STATE_ACTIVE:
1003                                 descr = obj->descr;
1004                                 state = obj->state;
1005                                 raw_spin_unlock_irqrestore(&db->lock, flags);
1006                                 debug_print_object(obj, "free");
1007                                 debug_object_fixup(descr->fixup_free,
1008                                                    (void *) oaddr, state);
1009                                 goto repeat;
1010                         default:
1011                                 hlist_del(&obj->node);
1012                                 __free_object(obj);
1013                                 break;
1014                         }
1015                 }
1016                 raw_spin_unlock_irqrestore(&db->lock, flags);
1017
1018                 if (cnt > debug_objects_maxchain)
1019                         debug_objects_maxchain = cnt;
1020
1021                 objs_checked += cnt;
1022         }
1023
1024         if (objs_checked > debug_objects_maxchecked)
1025                 debug_objects_maxchecked = objs_checked;
1026
1027         /* Schedule work to actually kmem_cache_free() objects */
1028         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1029                 WRITE_ONCE(obj_freeing, true);
1030                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1031         }
1032 }
1033
1034 void debug_check_no_obj_freed(const void *address, unsigned long size)
1035 {
1036         if (debug_objects_enabled)
1037                 __debug_check_no_obj_freed(address, size);
1038 }
1039 #endif
1040
1041 #ifdef CONFIG_DEBUG_FS
1042
1043 static int debug_stats_show(struct seq_file *m, void *v)
1044 {
1045         int cpu, obj_percpu_free = 0;
1046
1047         for_each_possible_cpu(cpu)
1048                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1049
1050         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1051         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1052         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1053         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1054         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1055         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1056         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1057         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1058         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1059         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1060         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1061         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1062         return 0;
1063 }
1064 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1065
1066 static int __init debug_objects_init_debugfs(void)
1067 {
1068         struct dentry *dbgdir;
1069
1070         if (!debug_objects_enabled)
1071                 return 0;
1072
1073         dbgdir = debugfs_create_dir("debug_objects", NULL);
1074
1075         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1076
1077         return 0;
1078 }
1079 __initcall(debug_objects_init_debugfs);
1080
1081 #else
1082 static inline void debug_objects_init_debugfs(void) { }
1083 #endif
1084
1085 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1086
1087 /* Random data structure for the self test */
1088 struct self_test {
1089         unsigned long   dummy1[6];
1090         int             static_init;
1091         unsigned long   dummy2[3];
1092 };
1093
1094 static __initconst const struct debug_obj_descr descr_type_test;
1095
1096 static bool __init is_static_object(void *addr)
1097 {
1098         struct self_test *obj = addr;
1099
1100         return obj->static_init;
1101 }
1102
1103 /*
1104  * fixup_init is called when:
1105  * - an active object is initialized
1106  */
1107 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1108 {
1109         struct self_test *obj = addr;
1110
1111         switch (state) {
1112         case ODEBUG_STATE_ACTIVE:
1113                 debug_object_deactivate(obj, &descr_type_test);
1114                 debug_object_init(obj, &descr_type_test);
1115                 return true;
1116         default:
1117                 return false;
1118         }
1119 }
1120
1121 /*
1122  * fixup_activate is called when:
1123  * - an active object is activated
1124  * - an unknown non-static object is activated
1125  */
1126 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1127 {
1128         struct self_test *obj = addr;
1129
1130         switch (state) {
1131         case ODEBUG_STATE_NOTAVAILABLE:
1132                 return true;
1133         case ODEBUG_STATE_ACTIVE:
1134                 debug_object_deactivate(obj, &descr_type_test);
1135                 debug_object_activate(obj, &descr_type_test);
1136                 return true;
1137
1138         default:
1139                 return false;
1140         }
1141 }
1142
1143 /*
1144  * fixup_destroy is called when:
1145  * - an active object is destroyed
1146  */
1147 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1148 {
1149         struct self_test *obj = addr;
1150
1151         switch (state) {
1152         case ODEBUG_STATE_ACTIVE:
1153                 debug_object_deactivate(obj, &descr_type_test);
1154                 debug_object_destroy(obj, &descr_type_test);
1155                 return true;
1156         default:
1157                 return false;
1158         }
1159 }
1160
1161 /*
1162  * fixup_free is called when:
1163  * - an active object is freed
1164  */
1165 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1166 {
1167         struct self_test *obj = addr;
1168
1169         switch (state) {
1170         case ODEBUG_STATE_ACTIVE:
1171                 debug_object_deactivate(obj, &descr_type_test);
1172                 debug_object_free(obj, &descr_type_test);
1173                 return true;
1174         default:
1175                 return false;
1176         }
1177 }
1178
1179 static int __init
1180 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1181 {
1182         struct debug_bucket *db;
1183         struct debug_obj *obj;
1184         unsigned long flags;
1185         int res = -EINVAL;
1186
1187         db = get_bucket((unsigned long) addr);
1188
1189         raw_spin_lock_irqsave(&db->lock, flags);
1190
1191         obj = lookup_object(addr, db);
1192         if (!obj && state != ODEBUG_STATE_NONE) {
1193                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1194                 goto out;
1195         }
1196         if (obj && obj->state != state) {
1197                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1198                        obj->state, state);
1199                 goto out;
1200         }
1201         if (fixups != debug_objects_fixups) {
1202                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1203                        fixups, debug_objects_fixups);
1204                 goto out;
1205         }
1206         if (warnings != debug_objects_warnings) {
1207                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1208                        warnings, debug_objects_warnings);
1209                 goto out;
1210         }
1211         res = 0;
1212 out:
1213         raw_spin_unlock_irqrestore(&db->lock, flags);
1214         if (res)
1215                 debug_objects_enabled = 0;
1216         return res;
1217 }
1218
1219 static __initconst const struct debug_obj_descr descr_type_test = {
1220         .name                   = "selftest",
1221         .is_static_object       = is_static_object,
1222         .fixup_init             = fixup_init,
1223         .fixup_activate         = fixup_activate,
1224         .fixup_destroy          = fixup_destroy,
1225         .fixup_free             = fixup_free,
1226 };
1227
1228 static __initdata struct self_test obj = { .static_init = 0 };
1229
1230 static void __init debug_objects_selftest(void)
1231 {
1232         int fixups, oldfixups, warnings, oldwarnings;
1233         unsigned long flags;
1234
1235         local_irq_save(flags);
1236
1237         fixups = oldfixups = debug_objects_fixups;
1238         warnings = oldwarnings = debug_objects_warnings;
1239         descr_test = &descr_type_test;
1240
1241         debug_object_init(&obj, &descr_type_test);
1242         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1243                 goto out;
1244         debug_object_activate(&obj, &descr_type_test);
1245         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1246                 goto out;
1247         debug_object_activate(&obj, &descr_type_test);
1248         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1249                 goto out;
1250         debug_object_deactivate(&obj, &descr_type_test);
1251         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1252                 goto out;
1253         debug_object_destroy(&obj, &descr_type_test);
1254         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1255                 goto out;
1256         debug_object_init(&obj, &descr_type_test);
1257         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1258                 goto out;
1259         debug_object_activate(&obj, &descr_type_test);
1260         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1261                 goto out;
1262         debug_object_deactivate(&obj, &descr_type_test);
1263         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1264                 goto out;
1265         debug_object_free(&obj, &descr_type_test);
1266         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1267                 goto out;
1268
1269         obj.static_init = 1;
1270         debug_object_activate(&obj, &descr_type_test);
1271         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1272                 goto out;
1273         debug_object_init(&obj, &descr_type_test);
1274         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1275                 goto out;
1276         debug_object_free(&obj, &descr_type_test);
1277         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1278                 goto out;
1279
1280 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1281         debug_object_init(&obj, &descr_type_test);
1282         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1283                 goto out;
1284         debug_object_activate(&obj, &descr_type_test);
1285         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1286                 goto out;
1287         __debug_check_no_obj_freed(&obj, sizeof(obj));
1288         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1289                 goto out;
1290 #endif
1291         pr_info("selftest passed\n");
1292
1293 out:
1294         debug_objects_fixups = oldfixups;
1295         debug_objects_warnings = oldwarnings;
1296         descr_test = NULL;
1297
1298         local_irq_restore(flags);
1299 }
1300 #else
1301 static inline void debug_objects_selftest(void) { }
1302 #endif
1303
1304 /*
1305  * Called during early boot to initialize the hash buckets and link
1306  * the static object pool objects into the poll list. After this call
1307  * the object tracker is fully operational.
1308  */
1309 void __init debug_objects_early_init(void)
1310 {
1311         int i;
1312
1313         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1314                 raw_spin_lock_init(&obj_hash[i].lock);
1315
1316         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1317                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1318 }
1319
1320 /*
1321  * Convert the statically allocated objects to dynamic ones:
1322  */
1323 static int __init debug_objects_replace_static_objects(void)
1324 {
1325         struct debug_bucket *db = obj_hash;
1326         struct hlist_node *tmp;
1327         struct debug_obj *obj, *new;
1328         HLIST_HEAD(objects);
1329         int i, cnt = 0;
1330
1331         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1332                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1333                 if (!obj)
1334                         goto free;
1335                 hlist_add_head(&obj->node, &objects);
1336         }
1337
1338         debug_objects_allocated += i;
1339
1340         /*
1341          * debug_objects_mem_init() is now called early that only one CPU is up
1342          * and interrupts have been disabled, so it is safe to replace the
1343          * active object references.
1344          */
1345
1346         /* Remove the statically allocated objects from the pool */
1347         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1348                 hlist_del(&obj->node);
1349         /* Move the allocated objects to the pool */
1350         hlist_move_list(&objects, &obj_pool);
1351
1352         /* Replace the active object references */
1353         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1354                 hlist_move_list(&db->list, &objects);
1355
1356                 hlist_for_each_entry(obj, &objects, node) {
1357                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1358                         hlist_del(&new->node);
1359                         /* copy object data */
1360                         *new = *obj;
1361                         hlist_add_head(&new->node, &db->list);
1362                         cnt++;
1363                 }
1364         }
1365
1366         pr_debug("%d of %d active objects replaced\n",
1367                  cnt, obj_pool_used);
1368         return 0;
1369 free:
1370         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1371                 hlist_del(&obj->node);
1372                 kmem_cache_free(obj_cache, obj);
1373         }
1374         return -ENOMEM;
1375 }
1376
1377 /*
1378  * Called after the kmem_caches are functional to setup a dedicated
1379  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1380  * prevents that the debug code is called on kmem_cache_free() for the
1381  * debug tracker objects to avoid recursive calls.
1382  */
1383 void __init debug_objects_mem_init(void)
1384 {
1385         int cpu, extras;
1386
1387         if (!debug_objects_enabled)
1388                 return;
1389
1390         /*
1391          * Initialize the percpu object pools
1392          *
1393          * Initialization is not strictly necessary, but was done for
1394          * completeness.
1395          */
1396         for_each_possible_cpu(cpu)
1397                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1398
1399         obj_cache = kmem_cache_create("debug_objects_cache",
1400                                       sizeof (struct debug_obj), 0,
1401                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1402                                       NULL);
1403
1404         if (!obj_cache || debug_objects_replace_static_objects()) {
1405                 debug_objects_enabled = 0;
1406                 kmem_cache_destroy(obj_cache);
1407                 pr_warn("out of memory.\n");
1408                 return;
1409         } else
1410                 debug_objects_selftest();
1411
1412 #ifdef CONFIG_HOTPLUG_CPU
1413         cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1414                                         object_cpu_offline);
1415 #endif
1416
1417         /*
1418          * Increase the thresholds for allocating and freeing objects
1419          * according to the number of possible CPUs available in the system.
1420          */
1421         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1422         debug_objects_pool_size += extras;
1423         debug_objects_pool_min_level += extras;
1424 }