GNU Linux-libre 5.4.274-gnu1
[releases.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28 #define ODEBUG_POOL_PERCPU_SIZE 64
29 #define ODEBUG_BATCH_SIZE       16
30
31 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
32 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
33 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
34
35 /*
36  * We limit the freeing of debug objects via workqueue at a maximum
37  * frequency of 10Hz and about 1024 objects for each freeing operation.
38  * So it is freeing at most 10k debug objects per second.
39  */
40 #define ODEBUG_FREE_WORK_MAX    1024
41 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
42
43 struct debug_bucket {
44         struct hlist_head       list;
45         raw_spinlock_t          lock;
46 };
47
48 /*
49  * Debug object percpu free list
50  * Access is protected by disabling irq
51  */
52 struct debug_percpu_free {
53         struct hlist_head       free_objs;
54         int                     obj_free;
55 };
56
57 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
59 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
60
61 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
62
63 static DEFINE_RAW_SPINLOCK(pool_lock);
64
65 static HLIST_HEAD(obj_pool);
66 static HLIST_HEAD(obj_to_free);
67
68 /*
69  * Because of the presence of percpu free pools, obj_pool_free will
70  * under-count those in the percpu free pools. Similarly, obj_pool_used
71  * will over-count those in the percpu free pools. Adjustments will be
72  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73  * can be off.
74  */
75 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
76 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
77 static int                      obj_pool_used;
78 static int                      obj_pool_max_used;
79 static bool                     obj_freeing;
80 /* The number of objs on the global free list */
81 static int                      obj_nr_tofree;
82
83 static int                      debug_objects_maxchain __read_mostly;
84 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
85 static int                      debug_objects_fixups __read_mostly;
86 static int                      debug_objects_warnings __read_mostly;
87 static int                      debug_objects_enabled __read_mostly
88                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89 static int                      debug_objects_pool_size __read_mostly
90                                 = ODEBUG_POOL_SIZE;
91 static int                      debug_objects_pool_min_level __read_mostly
92                                 = ODEBUG_POOL_MIN_LEVEL;
93 static struct debug_obj_descr   *descr_test  __read_mostly;
94 static struct kmem_cache        *obj_cache __read_mostly;
95
96 /*
97  * Track numbers of kmem_cache_alloc()/free() calls done.
98  */
99 static int                      debug_objects_allocated;
100 static int                      debug_objects_freed;
101
102 static void free_obj_work(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
104
105 static int __init enable_object_debug(char *str)
106 {
107         debug_objects_enabled = 1;
108         return 0;
109 }
110
111 static int __init disable_object_debug(char *str)
112 {
113         debug_objects_enabled = 0;
114         return 0;
115 }
116
117 early_param("debug_objects", enable_object_debug);
118 early_param("no_debug_objects", disable_object_debug);
119
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121         [ODEBUG_STATE_NONE]             = "none",
122         [ODEBUG_STATE_INIT]             = "initialized",
123         [ODEBUG_STATE_INACTIVE]         = "inactive",
124         [ODEBUG_STATE_ACTIVE]           = "active",
125         [ODEBUG_STATE_DESTROYED]        = "destroyed",
126         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
127 };
128
129 static void fill_pool(void)
130 {
131         gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
132         struct debug_obj *obj;
133         unsigned long flags;
134
135         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
136                 return;
137
138         /*
139          * Reuse objs from the global free list; they will be reinitialized
140          * when allocating.
141          *
142          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
143          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
144          * sections.
145          */
146         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
147                 raw_spin_lock_irqsave(&pool_lock, flags);
148                 /*
149                  * Recheck with the lock held as the worker thread might have
150                  * won the race and freed the global free list already.
151                  */
152                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
153                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
154                         hlist_del(&obj->node);
155                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
156                         hlist_add_head(&obj->node, &obj_pool);
157                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
158                 }
159                 raw_spin_unlock_irqrestore(&pool_lock, flags);
160         }
161
162         if (unlikely(!obj_cache))
163                 return;
164
165         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
166                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
167                 int cnt;
168
169                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
170                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
171                         if (!new[cnt])
172                                 break;
173                 }
174                 if (!cnt)
175                         return;
176
177                 raw_spin_lock_irqsave(&pool_lock, flags);
178                 while (cnt) {
179                         hlist_add_head(&new[--cnt]->node, &obj_pool);
180                         debug_objects_allocated++;
181                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
182                 }
183                 raw_spin_unlock_irqrestore(&pool_lock, flags);
184         }
185 }
186
187 /*
188  * Lookup an object in the hash bucket.
189  */
190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
191 {
192         struct debug_obj *obj;
193         int cnt = 0;
194
195         hlist_for_each_entry(obj, &b->list, node) {
196                 cnt++;
197                 if (obj->object == addr)
198                         return obj;
199         }
200         if (cnt > debug_objects_maxchain)
201                 debug_objects_maxchain = cnt;
202
203         return NULL;
204 }
205
206 /*
207  * Allocate a new object from the hlist
208  */
209 static struct debug_obj *__alloc_object(struct hlist_head *list)
210 {
211         struct debug_obj *obj = NULL;
212
213         if (list->first) {
214                 obj = hlist_entry(list->first, typeof(*obj), node);
215                 hlist_del(&obj->node);
216         }
217
218         return obj;
219 }
220
221 static struct debug_obj *
222 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
223 {
224         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225         struct debug_obj *obj;
226
227         if (likely(obj_cache)) {
228                 obj = __alloc_object(&percpu_pool->free_objs);
229                 if (obj) {
230                         percpu_pool->obj_free--;
231                         goto init_obj;
232                 }
233         }
234
235         raw_spin_lock(&pool_lock);
236         obj = __alloc_object(&obj_pool);
237         if (obj) {
238                 obj_pool_used++;
239                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
240
241                 /*
242                  * Looking ahead, allocate one batch of debug objects and
243                  * put them into the percpu free pool.
244                  */
245                 if (likely(obj_cache)) {
246                         int i;
247
248                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249                                 struct debug_obj *obj2;
250
251                                 obj2 = __alloc_object(&obj_pool);
252                                 if (!obj2)
253                                         break;
254                                 hlist_add_head(&obj2->node,
255                                                &percpu_pool->free_objs);
256                                 percpu_pool->obj_free++;
257                                 obj_pool_used++;
258                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
259                         }
260                 }
261
262                 if (obj_pool_used > obj_pool_max_used)
263                         obj_pool_max_used = obj_pool_used;
264
265                 if (obj_pool_free < obj_pool_min_free)
266                         obj_pool_min_free = obj_pool_free;
267         }
268         raw_spin_unlock(&pool_lock);
269
270 init_obj:
271         if (obj) {
272                 obj->object = addr;
273                 obj->descr  = descr;
274                 obj->state  = ODEBUG_STATE_NONE;
275                 obj->astate = 0;
276                 hlist_add_head(&obj->node, &b->list);
277         }
278         return obj;
279 }
280
281 /*
282  * workqueue function to free objects.
283  *
284  * To reduce contention on the global pool_lock, the actual freeing of
285  * debug objects will be delayed if the pool_lock is busy.
286  */
287 static void free_obj_work(struct work_struct *work)
288 {
289         struct hlist_node *tmp;
290         struct debug_obj *obj;
291         unsigned long flags;
292         HLIST_HEAD(tofree);
293
294         WRITE_ONCE(obj_freeing, false);
295         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
296                 return;
297
298         if (obj_pool_free >= debug_objects_pool_size)
299                 goto free_objs;
300
301         /*
302          * The objs on the pool list might be allocated before the work is
303          * run, so recheck if pool list it full or not, if not fill pool
304          * list from the global free list. As it is likely that a workload
305          * may be gearing up to use more and more objects, don't free any
306          * of them until the next round.
307          */
308         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
309                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
310                 hlist_del(&obj->node);
311                 hlist_add_head(&obj->node, &obj_pool);
312                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
313                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
314         }
315         raw_spin_unlock_irqrestore(&pool_lock, flags);
316         return;
317
318 free_objs:
319         /*
320          * Pool list is already full and there are still objs on the free
321          * list. Move remaining free objs to a temporary list to free the
322          * memory outside the pool_lock held region.
323          */
324         if (obj_nr_tofree) {
325                 hlist_move_list(&obj_to_free, &tofree);
326                 debug_objects_freed += obj_nr_tofree;
327                 WRITE_ONCE(obj_nr_tofree, 0);
328         }
329         raw_spin_unlock_irqrestore(&pool_lock, flags);
330
331         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
332                 hlist_del(&obj->node);
333                 kmem_cache_free(obj_cache, obj);
334         }
335 }
336
337 static void __free_object(struct debug_obj *obj)
338 {
339         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340         struct debug_percpu_free *percpu_pool;
341         int lookahead_count = 0;
342         unsigned long flags;
343         bool work;
344
345         local_irq_save(flags);
346         if (!obj_cache)
347                 goto free_to_obj_pool;
348
349         /*
350          * Try to free it into the percpu pool first.
351          */
352         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
355                 percpu_pool->obj_free++;
356                 local_irq_restore(flags);
357                 return;
358         }
359
360         /*
361          * As the percpu pool is full, look ahead and pull out a batch
362          * of objects from the percpu pool and free them as well.
363          */
364         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366                 if (!objs[lookahead_count])
367                         break;
368                 percpu_pool->obj_free--;
369         }
370
371 free_to_obj_pool:
372         raw_spin_lock(&pool_lock);
373         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
375         obj_pool_used--;
376
377         if (work) {
378                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
379                 hlist_add_head(&obj->node, &obj_to_free);
380                 if (lookahead_count) {
381                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
382                         obj_pool_used -= lookahead_count;
383                         while (lookahead_count) {
384                                 hlist_add_head(&objs[--lookahead_count]->node,
385                                                &obj_to_free);
386                         }
387                 }
388
389                 if ((obj_pool_free > debug_objects_pool_size) &&
390                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391                         int i;
392
393                         /*
394                          * Free one more batch of objects from obj_pool.
395                          */
396                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397                                 obj = __alloc_object(&obj_pool);
398                                 hlist_add_head(&obj->node, &obj_to_free);
399                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
400                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
401                         }
402                 }
403         } else {
404                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
405                 hlist_add_head(&obj->node, &obj_pool);
406                 if (lookahead_count) {
407                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
408                         obj_pool_used -= lookahead_count;
409                         while (lookahead_count) {
410                                 hlist_add_head(&objs[--lookahead_count]->node,
411                                                &obj_pool);
412                         }
413                 }
414         }
415         raw_spin_unlock(&pool_lock);
416         local_irq_restore(flags);
417 }
418
419 /*
420  * Put the object back into the pool and schedule work to free objects
421  * if necessary.
422  */
423 static void free_object(struct debug_obj *obj)
424 {
425         __free_object(obj);
426         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
427                 WRITE_ONCE(obj_freeing, true);
428                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429         }
430 }
431
432 /*
433  * We run out of memory. That means we probably have tons of objects
434  * allocated.
435  */
436 static void debug_objects_oom(void)
437 {
438         struct debug_bucket *db = obj_hash;
439         struct hlist_node *tmp;
440         HLIST_HEAD(freelist);
441         struct debug_obj *obj;
442         unsigned long flags;
443         int i;
444
445         pr_warn("Out of memory. ODEBUG disabled\n");
446
447         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
448                 raw_spin_lock_irqsave(&db->lock, flags);
449                 hlist_move_list(&db->list, &freelist);
450                 raw_spin_unlock_irqrestore(&db->lock, flags);
451
452                 /* Now free them */
453                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
454                         hlist_del(&obj->node);
455                         free_object(obj);
456                 }
457         }
458 }
459
460 /*
461  * We use the pfn of the address for the hash. That way we can check
462  * for freed objects simply by checking the affected bucket.
463  */
464 static struct debug_bucket *get_bucket(unsigned long addr)
465 {
466         unsigned long hash;
467
468         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
469         return &obj_hash[hash];
470 }
471
472 static void debug_print_object(struct debug_obj *obj, char *msg)
473 {
474         struct debug_obj_descr *descr = obj->descr;
475         static int limit;
476
477         /*
478          * Don't report if lookup_object_or_alloc() by the current thread
479          * failed because lookup_object_or_alloc()/debug_objects_oom() by a
480          * concurrent thread turned off debug_objects_enabled and cleared
481          * the hash buckets.
482          */
483         if (!debug_objects_enabled)
484                 return;
485
486         if (limit < 5 && descr != descr_test) {
487                 void *hint = descr->debug_hint ?
488                         descr->debug_hint(obj->object) : NULL;
489                 limit++;
490                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
491                                  "object type: %s hint: %pS\n",
492                         msg, obj_states[obj->state], obj->astate,
493                         descr->name, hint);
494         }
495         debug_objects_warnings++;
496 }
497
498 /*
499  * Try to repair the damage, so we have a better chance to get useful
500  * debug output.
501  */
502 static bool
503 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
504                    void * addr, enum debug_obj_state state)
505 {
506         if (fixup && fixup(addr, state)) {
507                 debug_objects_fixups++;
508                 return true;
509         }
510         return false;
511 }
512
513 static void debug_object_is_on_stack(void *addr, int onstack)
514 {
515         int is_on_stack;
516         static int limit;
517
518         if (limit > 4)
519                 return;
520
521         is_on_stack = object_is_on_stack(addr);
522         if (is_on_stack == onstack)
523                 return;
524
525         limit++;
526         if (is_on_stack)
527                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
528                          task_stack_page(current));
529         else
530                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
531                          task_stack_page(current));
532
533         WARN_ON(1);
534 }
535
536 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
537                                                 struct debug_obj_descr *descr,
538                                                 bool onstack, bool alloc_ifstatic)
539 {
540         struct debug_obj *obj = lookup_object(addr, b);
541         enum debug_obj_state state = ODEBUG_STATE_NONE;
542
543         if (likely(obj))
544                 return obj;
545
546         /*
547          * debug_object_init() unconditionally allocates untracked
548          * objects. It does not matter whether it is a static object or
549          * not.
550          *
551          * debug_object_assert_init() and debug_object_activate() allow
552          * allocation only if the descriptor callback confirms that the
553          * object is static and considered initialized. For non-static
554          * objects the allocation needs to be done from the fixup callback.
555          */
556         if (unlikely(alloc_ifstatic)) {
557                 if (!descr->is_static_object || !descr->is_static_object(addr))
558                         return ERR_PTR(-ENOENT);
559                 /* Statically allocated objects are considered initialized */
560                 state = ODEBUG_STATE_INIT;
561         }
562
563         obj = alloc_object(addr, b, descr);
564         if (likely(obj)) {
565                 obj->state = state;
566                 debug_object_is_on_stack(addr, onstack);
567                 return obj;
568         }
569
570         /* Out of memory. Do the cleanup outside of the locked region */
571         debug_objects_enabled = 0;
572         return NULL;
573 }
574
575 static void debug_objects_fill_pool(void)
576 {
577         /*
578          * On RT enabled kernels the pool refill must happen in preemptible
579          * context:
580          */
581         if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
582                 fill_pool();
583 }
584
585 static void
586 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
587 {
588         enum debug_obj_state state;
589         struct debug_bucket *db;
590         struct debug_obj *obj;
591         unsigned long flags;
592
593         debug_objects_fill_pool();
594
595         db = get_bucket((unsigned long) addr);
596
597         raw_spin_lock_irqsave(&db->lock, flags);
598
599         obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
600         if (unlikely(!obj)) {
601                 raw_spin_unlock_irqrestore(&db->lock, flags);
602                 debug_objects_oom();
603                 return;
604         }
605
606         switch (obj->state) {
607         case ODEBUG_STATE_NONE:
608         case ODEBUG_STATE_INIT:
609         case ODEBUG_STATE_INACTIVE:
610                 obj->state = ODEBUG_STATE_INIT;
611                 break;
612
613         case ODEBUG_STATE_ACTIVE:
614                 state = obj->state;
615                 raw_spin_unlock_irqrestore(&db->lock, flags);
616                 debug_print_object(obj, "init");
617                 debug_object_fixup(descr->fixup_init, addr, state);
618                 return;
619
620         case ODEBUG_STATE_DESTROYED:
621                 raw_spin_unlock_irqrestore(&db->lock, flags);
622                 debug_print_object(obj, "init");
623                 return;
624         default:
625                 break;
626         }
627
628         raw_spin_unlock_irqrestore(&db->lock, flags);
629 }
630
631 /**
632  * debug_object_init - debug checks when an object is initialized
633  * @addr:       address of the object
634  * @descr:      pointer to an object specific debug description structure
635  */
636 void debug_object_init(void *addr, struct debug_obj_descr *descr)
637 {
638         if (!debug_objects_enabled)
639                 return;
640
641         __debug_object_init(addr, descr, 0);
642 }
643 EXPORT_SYMBOL_GPL(debug_object_init);
644
645 /**
646  * debug_object_init_on_stack - debug checks when an object on stack is
647  *                              initialized
648  * @addr:       address of the object
649  * @descr:      pointer to an object specific debug description structure
650  */
651 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
652 {
653         if (!debug_objects_enabled)
654                 return;
655
656         __debug_object_init(addr, descr, 1);
657 }
658 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
659
660 /**
661  * debug_object_activate - debug checks when an object is activated
662  * @addr:       address of the object
663  * @descr:      pointer to an object specific debug description structure
664  * Returns 0 for success, -EINVAL for check failed.
665  */
666 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
667 {
668         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
669         enum debug_obj_state state;
670         struct debug_bucket *db;
671         struct debug_obj *obj;
672         unsigned long flags;
673         int ret;
674
675         if (!debug_objects_enabled)
676                 return 0;
677
678         debug_objects_fill_pool();
679
680         db = get_bucket((unsigned long) addr);
681
682         raw_spin_lock_irqsave(&db->lock, flags);
683
684         obj = lookup_object_or_alloc(addr, db, descr, false, true);
685         if (likely(!IS_ERR_OR_NULL(obj))) {
686                 bool print_object = false;
687
688                 switch (obj->state) {
689                 case ODEBUG_STATE_INIT:
690                 case ODEBUG_STATE_INACTIVE:
691                         obj->state = ODEBUG_STATE_ACTIVE;
692                         ret = 0;
693                         break;
694
695                 case ODEBUG_STATE_ACTIVE:
696                         state = obj->state;
697                         raw_spin_unlock_irqrestore(&db->lock, flags);
698                         debug_print_object(obj, "activate");
699                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
700                         return ret ? 0 : -EINVAL;
701
702                 case ODEBUG_STATE_DESTROYED:
703                         print_object = true;
704                         ret = -EINVAL;
705                         break;
706                 default:
707                         ret = 0;
708                         break;
709                 }
710                 raw_spin_unlock_irqrestore(&db->lock, flags);
711                 if (print_object)
712                         debug_print_object(obj, "activate");
713                 return ret;
714         }
715
716         raw_spin_unlock_irqrestore(&db->lock, flags);
717
718         /* If NULL the allocation has hit OOM */
719         if (!obj) {
720                 debug_objects_oom();
721                 return 0;
722         }
723
724         /* Object is neither static nor tracked. It's not initialized */
725         debug_print_object(&o, "activate");
726         ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
727         return ret ? 0 : -EINVAL;
728 }
729 EXPORT_SYMBOL_GPL(debug_object_activate);
730
731 /**
732  * debug_object_deactivate - debug checks when an object is deactivated
733  * @addr:       address of the object
734  * @descr:      pointer to an object specific debug description structure
735  */
736 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
737 {
738         struct debug_bucket *db;
739         struct debug_obj *obj;
740         unsigned long flags;
741         bool print_object = false;
742
743         if (!debug_objects_enabled)
744                 return;
745
746         db = get_bucket((unsigned long) addr);
747
748         raw_spin_lock_irqsave(&db->lock, flags);
749
750         obj = lookup_object(addr, db);
751         if (obj) {
752                 switch (obj->state) {
753                 case ODEBUG_STATE_INIT:
754                 case ODEBUG_STATE_INACTIVE:
755                 case ODEBUG_STATE_ACTIVE:
756                         if (!obj->astate)
757                                 obj->state = ODEBUG_STATE_INACTIVE;
758                         else
759                                 print_object = true;
760                         break;
761
762                 case ODEBUG_STATE_DESTROYED:
763                         print_object = true;
764                         break;
765                 default:
766                         break;
767                 }
768         }
769
770         raw_spin_unlock_irqrestore(&db->lock, flags);
771         if (!obj) {
772                 struct debug_obj o = { .object = addr,
773                                        .state = ODEBUG_STATE_NOTAVAILABLE,
774                                        .descr = descr };
775
776                 debug_print_object(&o, "deactivate");
777         } else if (print_object) {
778                 debug_print_object(obj, "deactivate");
779         }
780 }
781 EXPORT_SYMBOL_GPL(debug_object_deactivate);
782
783 /**
784  * debug_object_destroy - debug checks when an object is destroyed
785  * @addr:       address of the object
786  * @descr:      pointer to an object specific debug description structure
787  */
788 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
789 {
790         enum debug_obj_state state;
791         struct debug_bucket *db;
792         struct debug_obj *obj;
793         unsigned long flags;
794         bool print_object = false;
795
796         if (!debug_objects_enabled)
797                 return;
798
799         db = get_bucket((unsigned long) addr);
800
801         raw_spin_lock_irqsave(&db->lock, flags);
802
803         obj = lookup_object(addr, db);
804         if (!obj)
805                 goto out_unlock;
806
807         switch (obj->state) {
808         case ODEBUG_STATE_NONE:
809         case ODEBUG_STATE_INIT:
810         case ODEBUG_STATE_INACTIVE:
811                 obj->state = ODEBUG_STATE_DESTROYED;
812                 break;
813         case ODEBUG_STATE_ACTIVE:
814                 state = obj->state;
815                 raw_spin_unlock_irqrestore(&db->lock, flags);
816                 debug_print_object(obj, "destroy");
817                 debug_object_fixup(descr->fixup_destroy, addr, state);
818                 return;
819
820         case ODEBUG_STATE_DESTROYED:
821                 print_object = true;
822                 break;
823         default:
824                 break;
825         }
826 out_unlock:
827         raw_spin_unlock_irqrestore(&db->lock, flags);
828         if (print_object)
829                 debug_print_object(obj, "destroy");
830 }
831 EXPORT_SYMBOL_GPL(debug_object_destroy);
832
833 /**
834  * debug_object_free - debug checks when an object is freed
835  * @addr:       address of the object
836  * @descr:      pointer to an object specific debug description structure
837  */
838 void debug_object_free(void *addr, struct debug_obj_descr *descr)
839 {
840         enum debug_obj_state state;
841         struct debug_bucket *db;
842         struct debug_obj *obj;
843         unsigned long flags;
844
845         if (!debug_objects_enabled)
846                 return;
847
848         db = get_bucket((unsigned long) addr);
849
850         raw_spin_lock_irqsave(&db->lock, flags);
851
852         obj = lookup_object(addr, db);
853         if (!obj)
854                 goto out_unlock;
855
856         switch (obj->state) {
857         case ODEBUG_STATE_ACTIVE:
858                 state = obj->state;
859                 raw_spin_unlock_irqrestore(&db->lock, flags);
860                 debug_print_object(obj, "free");
861                 debug_object_fixup(descr->fixup_free, addr, state);
862                 return;
863         default:
864                 hlist_del(&obj->node);
865                 raw_spin_unlock_irqrestore(&db->lock, flags);
866                 free_object(obj);
867                 return;
868         }
869 out_unlock:
870         raw_spin_unlock_irqrestore(&db->lock, flags);
871 }
872 EXPORT_SYMBOL_GPL(debug_object_free);
873
874 /**
875  * debug_object_assert_init - debug checks when object should be init-ed
876  * @addr:       address of the object
877  * @descr:      pointer to an object specific debug description structure
878  */
879 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
880 {
881         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
882         struct debug_bucket *db;
883         struct debug_obj *obj;
884         unsigned long flags;
885
886         if (!debug_objects_enabled)
887                 return;
888
889         debug_objects_fill_pool();
890
891         db = get_bucket((unsigned long) addr);
892
893         raw_spin_lock_irqsave(&db->lock, flags);
894         obj = lookup_object_or_alloc(addr, db, descr, false, true);
895         raw_spin_unlock_irqrestore(&db->lock, flags);
896         if (likely(!IS_ERR_OR_NULL(obj)))
897                 return;
898
899         /* If NULL the allocation has hit OOM */
900         if (!obj) {
901                 debug_objects_oom();
902                 return;
903         }
904
905         /* Object is neither tracked nor static. It's not initialized. */
906         debug_print_object(&o, "assert_init");
907         debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
908 }
909 EXPORT_SYMBOL_GPL(debug_object_assert_init);
910
911 /**
912  * debug_object_active_state - debug checks object usage state machine
913  * @addr:       address of the object
914  * @descr:      pointer to an object specific debug description structure
915  * @expect:     expected state
916  * @next:       state to move to if expected state is found
917  */
918 void
919 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
920                           unsigned int expect, unsigned int next)
921 {
922         struct debug_bucket *db;
923         struct debug_obj *obj;
924         unsigned long flags;
925         bool print_object = false;
926
927         if (!debug_objects_enabled)
928                 return;
929
930         db = get_bucket((unsigned long) addr);
931
932         raw_spin_lock_irqsave(&db->lock, flags);
933
934         obj = lookup_object(addr, db);
935         if (obj) {
936                 switch (obj->state) {
937                 case ODEBUG_STATE_ACTIVE:
938                         if (obj->astate == expect)
939                                 obj->astate = next;
940                         else
941                                 print_object = true;
942                         break;
943
944                 default:
945                         print_object = true;
946                         break;
947                 }
948         }
949
950         raw_spin_unlock_irqrestore(&db->lock, flags);
951         if (!obj) {
952                 struct debug_obj o = { .object = addr,
953                                        .state = ODEBUG_STATE_NOTAVAILABLE,
954                                        .descr = descr };
955
956                 debug_print_object(&o, "active_state");
957         } else if (print_object) {
958                 debug_print_object(obj, "active_state");
959         }
960 }
961 EXPORT_SYMBOL_GPL(debug_object_active_state);
962
963 #ifdef CONFIG_DEBUG_OBJECTS_FREE
964 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
965 {
966         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
967         struct debug_obj_descr *descr;
968         enum debug_obj_state state;
969         struct debug_bucket *db;
970         struct hlist_node *tmp;
971         struct debug_obj *obj;
972         int cnt, objs_checked = 0;
973
974         saddr = (unsigned long) address;
975         eaddr = saddr + size;
976         paddr = saddr & ODEBUG_CHUNK_MASK;
977         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
978         chunks >>= ODEBUG_CHUNK_SHIFT;
979
980         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
981                 db = get_bucket(paddr);
982
983 repeat:
984                 cnt = 0;
985                 raw_spin_lock_irqsave(&db->lock, flags);
986                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
987                         cnt++;
988                         oaddr = (unsigned long) obj->object;
989                         if (oaddr < saddr || oaddr >= eaddr)
990                                 continue;
991
992                         switch (obj->state) {
993                         case ODEBUG_STATE_ACTIVE:
994                                 descr = obj->descr;
995                                 state = obj->state;
996                                 raw_spin_unlock_irqrestore(&db->lock, flags);
997                                 debug_print_object(obj, "free");
998                                 debug_object_fixup(descr->fixup_free,
999                                                    (void *) oaddr, state);
1000                                 goto repeat;
1001                         default:
1002                                 hlist_del(&obj->node);
1003                                 __free_object(obj);
1004                                 break;
1005                         }
1006                 }
1007                 raw_spin_unlock_irqrestore(&db->lock, flags);
1008
1009                 if (cnt > debug_objects_maxchain)
1010                         debug_objects_maxchain = cnt;
1011
1012                 objs_checked += cnt;
1013         }
1014
1015         if (objs_checked > debug_objects_maxchecked)
1016                 debug_objects_maxchecked = objs_checked;
1017
1018         /* Schedule work to actually kmem_cache_free() objects */
1019         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1020                 WRITE_ONCE(obj_freeing, true);
1021                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1022         }
1023 }
1024
1025 void debug_check_no_obj_freed(const void *address, unsigned long size)
1026 {
1027         if (debug_objects_enabled)
1028                 __debug_check_no_obj_freed(address, size);
1029 }
1030 #endif
1031
1032 #ifdef CONFIG_DEBUG_FS
1033
1034 static int debug_stats_show(struct seq_file *m, void *v)
1035 {
1036         int cpu, obj_percpu_free = 0;
1037
1038         for_each_possible_cpu(cpu)
1039                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1040
1041         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1042         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1043         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1044         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1045         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1046         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1047         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1048         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1049         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1050         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1051         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1052         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1053         return 0;
1054 }
1055
1056 static int debug_stats_open(struct inode *inode, struct file *filp)
1057 {
1058         return single_open(filp, debug_stats_show, NULL);
1059 }
1060
1061 static const struct file_operations debug_stats_fops = {
1062         .open           = debug_stats_open,
1063         .read           = seq_read,
1064         .llseek         = seq_lseek,
1065         .release        = single_release,
1066 };
1067
1068 static int __init debug_objects_init_debugfs(void)
1069 {
1070         struct dentry *dbgdir;
1071
1072         if (!debug_objects_enabled)
1073                 return 0;
1074
1075         dbgdir = debugfs_create_dir("debug_objects", NULL);
1076
1077         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1078
1079         return 0;
1080 }
1081 __initcall(debug_objects_init_debugfs);
1082
1083 #else
1084 static inline void debug_objects_init_debugfs(void) { }
1085 #endif
1086
1087 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1088
1089 /* Random data structure for the self test */
1090 struct self_test {
1091         unsigned long   dummy1[6];
1092         int             static_init;
1093         unsigned long   dummy2[3];
1094 };
1095
1096 static __initdata struct debug_obj_descr descr_type_test;
1097
1098 static bool __init is_static_object(void *addr)
1099 {
1100         struct self_test *obj = addr;
1101
1102         return obj->static_init;
1103 }
1104
1105 /*
1106  * fixup_init is called when:
1107  * - an active object is initialized
1108  */
1109 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1110 {
1111         struct self_test *obj = addr;
1112
1113         switch (state) {
1114         case ODEBUG_STATE_ACTIVE:
1115                 debug_object_deactivate(obj, &descr_type_test);
1116                 debug_object_init(obj, &descr_type_test);
1117                 return true;
1118         default:
1119                 return false;
1120         }
1121 }
1122
1123 /*
1124  * fixup_activate is called when:
1125  * - an active object is activated
1126  * - an unknown non-static object is activated
1127  */
1128 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1129 {
1130         struct self_test *obj = addr;
1131
1132         switch (state) {
1133         case ODEBUG_STATE_NOTAVAILABLE:
1134                 return true;
1135         case ODEBUG_STATE_ACTIVE:
1136                 debug_object_deactivate(obj, &descr_type_test);
1137                 debug_object_activate(obj, &descr_type_test);
1138                 return true;
1139
1140         default:
1141                 return false;
1142         }
1143 }
1144
1145 /*
1146  * fixup_destroy is called when:
1147  * - an active object is destroyed
1148  */
1149 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1150 {
1151         struct self_test *obj = addr;
1152
1153         switch (state) {
1154         case ODEBUG_STATE_ACTIVE:
1155                 debug_object_deactivate(obj, &descr_type_test);
1156                 debug_object_destroy(obj, &descr_type_test);
1157                 return true;
1158         default:
1159                 return false;
1160         }
1161 }
1162
1163 /*
1164  * fixup_free is called when:
1165  * - an active object is freed
1166  */
1167 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1168 {
1169         struct self_test *obj = addr;
1170
1171         switch (state) {
1172         case ODEBUG_STATE_ACTIVE:
1173                 debug_object_deactivate(obj, &descr_type_test);
1174                 debug_object_free(obj, &descr_type_test);
1175                 return true;
1176         default:
1177                 return false;
1178         }
1179 }
1180
1181 static int __init
1182 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1183 {
1184         struct debug_bucket *db;
1185         struct debug_obj *obj;
1186         unsigned long flags;
1187         int res = -EINVAL;
1188
1189         db = get_bucket((unsigned long) addr);
1190
1191         raw_spin_lock_irqsave(&db->lock, flags);
1192
1193         obj = lookup_object(addr, db);
1194         if (!obj && state != ODEBUG_STATE_NONE) {
1195                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1196                 goto out;
1197         }
1198         if (obj && obj->state != state) {
1199                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1200                        obj->state, state);
1201                 goto out;
1202         }
1203         if (fixups != debug_objects_fixups) {
1204                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1205                        fixups, debug_objects_fixups);
1206                 goto out;
1207         }
1208         if (warnings != debug_objects_warnings) {
1209                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1210                        warnings, debug_objects_warnings);
1211                 goto out;
1212         }
1213         res = 0;
1214 out:
1215         raw_spin_unlock_irqrestore(&db->lock, flags);
1216         if (res)
1217                 debug_objects_enabled = 0;
1218         return res;
1219 }
1220
1221 static __initdata struct debug_obj_descr descr_type_test = {
1222         .name                   = "selftest",
1223         .is_static_object       = is_static_object,
1224         .fixup_init             = fixup_init,
1225         .fixup_activate         = fixup_activate,
1226         .fixup_destroy          = fixup_destroy,
1227         .fixup_free             = fixup_free,
1228 };
1229
1230 static __initdata struct self_test obj = { .static_init = 0 };
1231
1232 static void __init debug_objects_selftest(void)
1233 {
1234         int fixups, oldfixups, warnings, oldwarnings;
1235         unsigned long flags;
1236
1237         local_irq_save(flags);
1238
1239         fixups = oldfixups = debug_objects_fixups;
1240         warnings = oldwarnings = debug_objects_warnings;
1241         descr_test = &descr_type_test;
1242
1243         debug_object_init(&obj, &descr_type_test);
1244         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1245                 goto out;
1246         debug_object_activate(&obj, &descr_type_test);
1247         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1248                 goto out;
1249         debug_object_activate(&obj, &descr_type_test);
1250         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1251                 goto out;
1252         debug_object_deactivate(&obj, &descr_type_test);
1253         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1254                 goto out;
1255         debug_object_destroy(&obj, &descr_type_test);
1256         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1257                 goto out;
1258         debug_object_init(&obj, &descr_type_test);
1259         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1260                 goto out;
1261         debug_object_activate(&obj, &descr_type_test);
1262         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1263                 goto out;
1264         debug_object_deactivate(&obj, &descr_type_test);
1265         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1266                 goto out;
1267         debug_object_free(&obj, &descr_type_test);
1268         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1269                 goto out;
1270
1271         obj.static_init = 1;
1272         debug_object_activate(&obj, &descr_type_test);
1273         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1274                 goto out;
1275         debug_object_init(&obj, &descr_type_test);
1276         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1277                 goto out;
1278         debug_object_free(&obj, &descr_type_test);
1279         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1280                 goto out;
1281
1282 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1283         debug_object_init(&obj, &descr_type_test);
1284         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1285                 goto out;
1286         debug_object_activate(&obj, &descr_type_test);
1287         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1288                 goto out;
1289         __debug_check_no_obj_freed(&obj, sizeof(obj));
1290         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1291                 goto out;
1292 #endif
1293         pr_info("selftest passed\n");
1294
1295 out:
1296         debug_objects_fixups = oldfixups;
1297         debug_objects_warnings = oldwarnings;
1298         descr_test = NULL;
1299
1300         local_irq_restore(flags);
1301 }
1302 #else
1303 static inline void debug_objects_selftest(void) { }
1304 #endif
1305
1306 /*
1307  * Called during early boot to initialize the hash buckets and link
1308  * the static object pool objects into the poll list. After this call
1309  * the object tracker is fully operational.
1310  */
1311 void __init debug_objects_early_init(void)
1312 {
1313         int i;
1314
1315         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1316                 raw_spin_lock_init(&obj_hash[i].lock);
1317
1318         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1319                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1320 }
1321
1322 /*
1323  * Convert the statically allocated objects to dynamic ones:
1324  */
1325 static int __init debug_objects_replace_static_objects(void)
1326 {
1327         struct debug_bucket *db = obj_hash;
1328         struct hlist_node *tmp;
1329         struct debug_obj *obj, *new;
1330         HLIST_HEAD(objects);
1331         int i, cnt = 0;
1332
1333         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1334                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1335                 if (!obj)
1336                         goto free;
1337                 hlist_add_head(&obj->node, &objects);
1338         }
1339
1340         /*
1341          * debug_objects_mem_init() is now called early that only one CPU is up
1342          * and interrupts have been disabled, so it is safe to replace the
1343          * active object references.
1344          */
1345
1346         /* Remove the statically allocated objects from the pool */
1347         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1348                 hlist_del(&obj->node);
1349         /* Move the allocated objects to the pool */
1350         hlist_move_list(&objects, &obj_pool);
1351
1352         /* Replace the active object references */
1353         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1354                 hlist_move_list(&db->list, &objects);
1355
1356                 hlist_for_each_entry(obj, &objects, node) {
1357                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1358                         hlist_del(&new->node);
1359                         /* copy object data */
1360                         *new = *obj;
1361                         hlist_add_head(&new->node, &db->list);
1362                         cnt++;
1363                 }
1364         }
1365
1366         pr_debug("%d of %d active objects replaced\n",
1367                  cnt, obj_pool_used);
1368         return 0;
1369 free:
1370         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1371                 hlist_del(&obj->node);
1372                 kmem_cache_free(obj_cache, obj);
1373         }
1374         return -ENOMEM;
1375 }
1376
1377 /*
1378  * Called after the kmem_caches are functional to setup a dedicated
1379  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1380  * prevents that the debug code is called on kmem_cache_free() for the
1381  * debug tracker objects to avoid recursive calls.
1382  */
1383 void __init debug_objects_mem_init(void)
1384 {
1385         int cpu, extras;
1386
1387         if (!debug_objects_enabled)
1388                 return;
1389
1390         /*
1391          * Initialize the percpu object pools
1392          *
1393          * Initialization is not strictly necessary, but was done for
1394          * completeness.
1395          */
1396         for_each_possible_cpu(cpu)
1397                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1398
1399         obj_cache = kmem_cache_create("debug_objects_cache",
1400                                       sizeof (struct debug_obj), 0,
1401                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1402                                       NULL);
1403
1404         if (!obj_cache || debug_objects_replace_static_objects()) {
1405                 debug_objects_enabled = 0;
1406                 kmem_cache_destroy(obj_cache);
1407                 pr_warn("out of memory.\n");
1408         } else
1409                 debug_objects_selftest();
1410
1411         /*
1412          * Increase the thresholds for allocating and freeing objects
1413          * according to the number of possible CPUs available in the system.
1414          */
1415         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1416         debug_objects_pool_size += extras;
1417         debug_objects_pool_min_level += extras;
1418 }