GNU Linux-libre 4.14.332-gnu1
[releases.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 struct debug_bucket {
34         struct hlist_head       list;
35         raw_spinlock_t          lock;
36 };
37
38 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
39
40 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43
44 static HLIST_HEAD(obj_pool);
45
46 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
47 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
48 static int                      obj_pool_used;
49 static int                      obj_pool_max_used;
50 static struct kmem_cache        *obj_cache;
51
52 static int                      debug_objects_maxchain __read_mostly;
53 static int                      debug_objects_fixups __read_mostly;
54 static int                      debug_objects_warnings __read_mostly;
55 static int                      debug_objects_enabled __read_mostly
56                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
57 static int                      debug_objects_pool_size __read_mostly
58                                 = ODEBUG_POOL_SIZE;
59 static int                      debug_objects_pool_min_level __read_mostly
60                                 = ODEBUG_POOL_MIN_LEVEL;
61 static struct debug_obj_descr   *descr_test  __read_mostly;
62
63 /*
64  * Track numbers of kmem_cache_alloc()/free() calls done.
65  */
66 static int                      debug_objects_allocated;
67 static int                      debug_objects_freed;
68
69 static void free_obj_work(struct work_struct *work);
70 static DECLARE_WORK(debug_obj_work, free_obj_work);
71
72 static int __init enable_object_debug(char *str)
73 {
74         debug_objects_enabled = 1;
75         return 0;
76 }
77
78 static int __init disable_object_debug(char *str)
79 {
80         debug_objects_enabled = 0;
81         return 0;
82 }
83
84 early_param("debug_objects", enable_object_debug);
85 early_param("no_debug_objects", disable_object_debug);
86
87 static const char *obj_states[ODEBUG_STATE_MAX] = {
88         [ODEBUG_STATE_NONE]             = "none",
89         [ODEBUG_STATE_INIT]             = "initialized",
90         [ODEBUG_STATE_INACTIVE]         = "inactive",
91         [ODEBUG_STATE_ACTIVE]           = "active",
92         [ODEBUG_STATE_DESTROYED]        = "destroyed",
93         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
94 };
95
96 static void fill_pool(void)
97 {
98         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
99         struct debug_obj *new;
100         unsigned long flags;
101
102         if (likely(obj_pool_free >= debug_objects_pool_min_level))
103                 return;
104
105         if (unlikely(!obj_cache))
106                 return;
107
108         while (obj_pool_free < debug_objects_pool_min_level) {
109
110                 new = kmem_cache_zalloc(obj_cache, gfp);
111                 if (!new)
112                         return;
113
114                 raw_spin_lock_irqsave(&pool_lock, flags);
115                 hlist_add_head(&new->node, &obj_pool);
116                 debug_objects_allocated++;
117                 obj_pool_free++;
118                 raw_spin_unlock_irqrestore(&pool_lock, flags);
119         }
120 }
121
122 /*
123  * Lookup an object in the hash bucket.
124  */
125 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
126 {
127         struct debug_obj *obj;
128         int cnt = 0;
129
130         hlist_for_each_entry(obj, &b->list, node) {
131                 cnt++;
132                 if (obj->object == addr)
133                         return obj;
134         }
135         if (cnt > debug_objects_maxchain)
136                 debug_objects_maxchain = cnt;
137
138         return NULL;
139 }
140
141 /*
142  * Allocate a new object. If the pool is empty, switch off the debugger.
143  * Must be called with interrupts disabled.
144  */
145 static struct debug_obj *
146 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
147 {
148         struct debug_obj *obj = NULL;
149
150         raw_spin_lock(&pool_lock);
151         if (obj_pool.first) {
152                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
153
154                 obj->object = addr;
155                 obj->descr  = descr;
156                 obj->state  = ODEBUG_STATE_NONE;
157                 obj->astate = 0;
158                 hlist_del(&obj->node);
159
160                 hlist_add_head(&obj->node, &b->list);
161
162                 obj_pool_used++;
163                 if (obj_pool_used > obj_pool_max_used)
164                         obj_pool_max_used = obj_pool_used;
165
166                 obj_pool_free--;
167                 if (obj_pool_free < obj_pool_min_free)
168                         obj_pool_min_free = obj_pool_free;
169         }
170         raw_spin_unlock(&pool_lock);
171
172         return obj;
173 }
174
175 /*
176  * workqueue function to free objects.
177  *
178  * To reduce contention on the global pool_lock, the actual freeing of
179  * debug objects will be delayed if the pool_lock is busy. We also free
180  * the objects in a batch of 4 for each lock/unlock cycle.
181  */
182 #define ODEBUG_FREE_BATCH       4
183
184 static void free_obj_work(struct work_struct *work)
185 {
186         struct debug_obj *objs[ODEBUG_FREE_BATCH];
187         unsigned long flags;
188         int i;
189
190         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
191                 return;
192         while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
193                 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
194                         objs[i] = hlist_entry(obj_pool.first,
195                                               typeof(*objs[0]), node);
196                         hlist_del(&objs[i]->node);
197                 }
198
199                 obj_pool_free -= ODEBUG_FREE_BATCH;
200                 debug_objects_freed += ODEBUG_FREE_BATCH;
201                 /*
202                  * We release pool_lock across kmem_cache_free() to
203                  * avoid contention on pool_lock.
204                  */
205                 raw_spin_unlock_irqrestore(&pool_lock, flags);
206                 for (i = 0; i < ODEBUG_FREE_BATCH; i++)
207                         kmem_cache_free(obj_cache, objs[i]);
208                 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
209                         return;
210         }
211         raw_spin_unlock_irqrestore(&pool_lock, flags);
212 }
213
214 /*
215  * Put the object back into the pool and schedule work to free objects
216  * if necessary.
217  */
218 static void free_object(struct debug_obj *obj)
219 {
220         unsigned long flags;
221         int sched = 0;
222
223         raw_spin_lock_irqsave(&pool_lock, flags);
224         /*
225          * schedule work when the pool is filled and the cache is
226          * initialized:
227          */
228         if (obj_pool_free > debug_objects_pool_size && obj_cache)
229                 sched = 1;
230         hlist_add_head(&obj->node, &obj_pool);
231         obj_pool_free++;
232         obj_pool_used--;
233         raw_spin_unlock_irqrestore(&pool_lock, flags);
234         if (sched)
235                 schedule_work(&debug_obj_work);
236 }
237
238 /*
239  * We run out of memory. That means we probably have tons of objects
240  * allocated.
241  */
242 static void debug_objects_oom(void)
243 {
244         struct debug_bucket *db = obj_hash;
245         struct hlist_node *tmp;
246         HLIST_HEAD(freelist);
247         struct debug_obj *obj;
248         unsigned long flags;
249         int i;
250
251         pr_warn("Out of memory. ODEBUG disabled\n");
252
253         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
254                 raw_spin_lock_irqsave(&db->lock, flags);
255                 hlist_move_list(&db->list, &freelist);
256                 raw_spin_unlock_irqrestore(&db->lock, flags);
257
258                 /* Now free them */
259                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
260                         hlist_del(&obj->node);
261                         free_object(obj);
262                 }
263         }
264 }
265
266 /*
267  * We use the pfn of the address for the hash. That way we can check
268  * for freed objects simply by checking the affected bucket.
269  */
270 static struct debug_bucket *get_bucket(unsigned long addr)
271 {
272         unsigned long hash;
273
274         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
275         return &obj_hash[hash];
276 }
277
278 static void debug_print_object(struct debug_obj *obj, char *msg)
279 {
280         struct debug_obj_descr *descr = obj->descr;
281         static int limit;
282
283         /*
284          * Don't report if lookup_object_or_alloc() by the current thread
285          * failed because lookup_object_or_alloc()/debug_objects_oom() by a
286          * concurrent thread turned off debug_objects_enabled and cleared
287          * the hash buckets.
288          */
289         if (!debug_objects_enabled)
290                 return;
291
292         if (limit < 5 && descr != descr_test) {
293                 void *hint = descr->debug_hint ?
294                         descr->debug_hint(obj->object) : NULL;
295                 limit++;
296                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
297                                  "object type: %s hint: %pS\n",
298                         msg, obj_states[obj->state], obj->astate,
299                         descr->name, hint);
300         }
301         debug_objects_warnings++;
302 }
303
304 /*
305  * Try to repair the damage, so we have a better chance to get useful
306  * debug output.
307  */
308 static bool
309 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
310                    void * addr, enum debug_obj_state state)
311 {
312         if (fixup && fixup(addr, state)) {
313                 debug_objects_fixups++;
314                 return true;
315         }
316         return false;
317 }
318
319 static void debug_object_is_on_stack(void *addr, int onstack)
320 {
321         int is_on_stack;
322         static int limit;
323
324         if (limit > 4)
325                 return;
326
327         is_on_stack = object_is_on_stack(addr);
328         if (is_on_stack == onstack)
329                 return;
330
331         limit++;
332         if (is_on_stack)
333                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
334                          task_stack_page(current));
335         else
336                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
337                          task_stack_page(current));
338
339         WARN_ON(1);
340 }
341
342 static void
343 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
344 {
345         enum debug_obj_state state;
346         struct debug_bucket *db;
347         struct debug_obj *obj;
348         unsigned long flags;
349
350         fill_pool();
351
352         db = get_bucket((unsigned long) addr);
353
354         raw_spin_lock_irqsave(&db->lock, flags);
355
356         obj = lookup_object(addr, db);
357         if (!obj) {
358                 obj = alloc_object(addr, db, descr);
359                 if (!obj) {
360                         debug_objects_enabled = 0;
361                         raw_spin_unlock_irqrestore(&db->lock, flags);
362                         debug_objects_oom();
363                         return;
364                 }
365                 debug_object_is_on_stack(addr, onstack);
366         }
367
368         switch (obj->state) {
369         case ODEBUG_STATE_NONE:
370         case ODEBUG_STATE_INIT:
371         case ODEBUG_STATE_INACTIVE:
372                 obj->state = ODEBUG_STATE_INIT;
373                 break;
374
375         case ODEBUG_STATE_ACTIVE:
376                 debug_print_object(obj, "init");
377                 state = obj->state;
378                 raw_spin_unlock_irqrestore(&db->lock, flags);
379                 debug_object_fixup(descr->fixup_init, addr, state);
380                 return;
381
382         case ODEBUG_STATE_DESTROYED:
383                 debug_print_object(obj, "init");
384                 break;
385         default:
386                 break;
387         }
388
389         raw_spin_unlock_irqrestore(&db->lock, flags);
390 }
391
392 /**
393  * debug_object_init - debug checks when an object is initialized
394  * @addr:       address of the object
395  * @descr:      pointer to an object specific debug description structure
396  */
397 void debug_object_init(void *addr, struct debug_obj_descr *descr)
398 {
399         if (!debug_objects_enabled)
400                 return;
401
402         __debug_object_init(addr, descr, 0);
403 }
404 EXPORT_SYMBOL_GPL(debug_object_init);
405
406 /**
407  * debug_object_init_on_stack - debug checks when an object on stack is
408  *                              initialized
409  * @addr:       address of the object
410  * @descr:      pointer to an object specific debug description structure
411  */
412 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
413 {
414         if (!debug_objects_enabled)
415                 return;
416
417         __debug_object_init(addr, descr, 1);
418 }
419 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
420
421 /**
422  * debug_object_activate - debug checks when an object is activated
423  * @addr:       address of the object
424  * @descr:      pointer to an object specific debug description structure
425  * Returns 0 for success, -EINVAL for check failed.
426  */
427 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
428 {
429         enum debug_obj_state state;
430         struct debug_bucket *db;
431         struct debug_obj *obj;
432         unsigned long flags;
433         int ret;
434         struct debug_obj o = { .object = addr,
435                                .state = ODEBUG_STATE_NOTAVAILABLE,
436                                .descr = descr };
437
438         if (!debug_objects_enabled)
439                 return 0;
440
441         db = get_bucket((unsigned long) addr);
442
443         raw_spin_lock_irqsave(&db->lock, flags);
444
445         obj = lookup_object(addr, db);
446         if (obj) {
447                 switch (obj->state) {
448                 case ODEBUG_STATE_INIT:
449                 case ODEBUG_STATE_INACTIVE:
450                         obj->state = ODEBUG_STATE_ACTIVE;
451                         ret = 0;
452                         break;
453
454                 case ODEBUG_STATE_ACTIVE:
455                         debug_print_object(obj, "activate");
456                         state = obj->state;
457                         raw_spin_unlock_irqrestore(&db->lock, flags);
458                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
459                         return ret ? 0 : -EINVAL;
460
461                 case ODEBUG_STATE_DESTROYED:
462                         debug_print_object(obj, "activate");
463                         ret = -EINVAL;
464                         break;
465                 default:
466                         ret = 0;
467                         break;
468                 }
469                 raw_spin_unlock_irqrestore(&db->lock, flags);
470                 return ret;
471         }
472
473         raw_spin_unlock_irqrestore(&db->lock, flags);
474         /*
475          * We are here when a static object is activated. We
476          * let the type specific code confirm whether this is
477          * true or not. if true, we just make sure that the
478          * static object is tracked in the object tracker. If
479          * not, this must be a bug, so we try to fix it up.
480          */
481         if (descr->is_static_object && descr->is_static_object(addr)) {
482                 /* track this static object */
483                 debug_object_init(addr, descr);
484                 debug_object_activate(addr, descr);
485         } else {
486                 debug_print_object(&o, "activate");
487                 ret = debug_object_fixup(descr->fixup_activate, addr,
488                                         ODEBUG_STATE_NOTAVAILABLE);
489                 return ret ? 0 : -EINVAL;
490         }
491         return 0;
492 }
493 EXPORT_SYMBOL_GPL(debug_object_activate);
494
495 /**
496  * debug_object_deactivate - debug checks when an object is deactivated
497  * @addr:       address of the object
498  * @descr:      pointer to an object specific debug description structure
499  */
500 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
501 {
502         struct debug_bucket *db;
503         struct debug_obj *obj;
504         unsigned long flags;
505
506         if (!debug_objects_enabled)
507                 return;
508
509         db = get_bucket((unsigned long) addr);
510
511         raw_spin_lock_irqsave(&db->lock, flags);
512
513         obj = lookup_object(addr, db);
514         if (obj) {
515                 switch (obj->state) {
516                 case ODEBUG_STATE_INIT:
517                 case ODEBUG_STATE_INACTIVE:
518                 case ODEBUG_STATE_ACTIVE:
519                         if (!obj->astate)
520                                 obj->state = ODEBUG_STATE_INACTIVE;
521                         else
522                                 debug_print_object(obj, "deactivate");
523                         break;
524
525                 case ODEBUG_STATE_DESTROYED:
526                         debug_print_object(obj, "deactivate");
527                         break;
528                 default:
529                         break;
530                 }
531         } else {
532                 struct debug_obj o = { .object = addr,
533                                        .state = ODEBUG_STATE_NOTAVAILABLE,
534                                        .descr = descr };
535
536                 debug_print_object(&o, "deactivate");
537         }
538
539         raw_spin_unlock_irqrestore(&db->lock, flags);
540 }
541 EXPORT_SYMBOL_GPL(debug_object_deactivate);
542
543 /**
544  * debug_object_destroy - debug checks when an object is destroyed
545  * @addr:       address of the object
546  * @descr:      pointer to an object specific debug description structure
547  */
548 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
549 {
550         enum debug_obj_state state;
551         struct debug_bucket *db;
552         struct debug_obj *obj;
553         unsigned long flags;
554
555         if (!debug_objects_enabled)
556                 return;
557
558         db = get_bucket((unsigned long) addr);
559
560         raw_spin_lock_irqsave(&db->lock, flags);
561
562         obj = lookup_object(addr, db);
563         if (!obj)
564                 goto out_unlock;
565
566         switch (obj->state) {
567         case ODEBUG_STATE_NONE:
568         case ODEBUG_STATE_INIT:
569         case ODEBUG_STATE_INACTIVE:
570                 obj->state = ODEBUG_STATE_DESTROYED;
571                 break;
572         case ODEBUG_STATE_ACTIVE:
573                 debug_print_object(obj, "destroy");
574                 state = obj->state;
575                 raw_spin_unlock_irqrestore(&db->lock, flags);
576                 debug_object_fixup(descr->fixup_destroy, addr, state);
577                 return;
578
579         case ODEBUG_STATE_DESTROYED:
580                 debug_print_object(obj, "destroy");
581                 break;
582         default:
583                 break;
584         }
585 out_unlock:
586         raw_spin_unlock_irqrestore(&db->lock, flags);
587 }
588 EXPORT_SYMBOL_GPL(debug_object_destroy);
589
590 /**
591  * debug_object_free - debug checks when an object is freed
592  * @addr:       address of the object
593  * @descr:      pointer to an object specific debug description structure
594  */
595 void debug_object_free(void *addr, struct debug_obj_descr *descr)
596 {
597         enum debug_obj_state state;
598         struct debug_bucket *db;
599         struct debug_obj *obj;
600         unsigned long flags;
601
602         if (!debug_objects_enabled)
603                 return;
604
605         db = get_bucket((unsigned long) addr);
606
607         raw_spin_lock_irqsave(&db->lock, flags);
608
609         obj = lookup_object(addr, db);
610         if (!obj)
611                 goto out_unlock;
612
613         switch (obj->state) {
614         case ODEBUG_STATE_ACTIVE:
615                 debug_print_object(obj, "free");
616                 state = obj->state;
617                 raw_spin_unlock_irqrestore(&db->lock, flags);
618                 debug_object_fixup(descr->fixup_free, addr, state);
619                 return;
620         default:
621                 hlist_del(&obj->node);
622                 raw_spin_unlock_irqrestore(&db->lock, flags);
623                 free_object(obj);
624                 return;
625         }
626 out_unlock:
627         raw_spin_unlock_irqrestore(&db->lock, flags);
628 }
629 EXPORT_SYMBOL_GPL(debug_object_free);
630
631 /**
632  * debug_object_assert_init - debug checks when object should be init-ed
633  * @addr:       address of the object
634  * @descr:      pointer to an object specific debug description structure
635  */
636 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
637 {
638         struct debug_bucket *db;
639         struct debug_obj *obj;
640         unsigned long flags;
641
642         if (!debug_objects_enabled)
643                 return;
644
645         db = get_bucket((unsigned long) addr);
646
647         raw_spin_lock_irqsave(&db->lock, flags);
648
649         obj = lookup_object(addr, db);
650         if (!obj) {
651                 struct debug_obj o = { .object = addr,
652                                        .state = ODEBUG_STATE_NOTAVAILABLE,
653                                        .descr = descr };
654
655                 raw_spin_unlock_irqrestore(&db->lock, flags);
656                 /*
657                  * Maybe the object is static, and we let the type specific
658                  * code confirm. Track this static object if true, else invoke
659                  * fixup.
660                  */
661                 if (descr->is_static_object && descr->is_static_object(addr)) {
662                         /* Track this static object */
663                         debug_object_init(addr, descr);
664                 } else {
665                         debug_print_object(&o, "assert_init");
666                         debug_object_fixup(descr->fixup_assert_init, addr,
667                                            ODEBUG_STATE_NOTAVAILABLE);
668                 }
669                 return;
670         }
671
672         raw_spin_unlock_irqrestore(&db->lock, flags);
673 }
674 EXPORT_SYMBOL_GPL(debug_object_assert_init);
675
676 /**
677  * debug_object_active_state - debug checks object usage state machine
678  * @addr:       address of the object
679  * @descr:      pointer to an object specific debug description structure
680  * @expect:     expected state
681  * @next:       state to move to if expected state is found
682  */
683 void
684 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
685                           unsigned int expect, unsigned int next)
686 {
687         struct debug_bucket *db;
688         struct debug_obj *obj;
689         unsigned long flags;
690
691         if (!debug_objects_enabled)
692                 return;
693
694         db = get_bucket((unsigned long) addr);
695
696         raw_spin_lock_irqsave(&db->lock, flags);
697
698         obj = lookup_object(addr, db);
699         if (obj) {
700                 switch (obj->state) {
701                 case ODEBUG_STATE_ACTIVE:
702                         if (obj->astate == expect)
703                                 obj->astate = next;
704                         else
705                                 debug_print_object(obj, "active_state");
706                         break;
707
708                 default:
709                         debug_print_object(obj, "active_state");
710                         break;
711                 }
712         } else {
713                 struct debug_obj o = { .object = addr,
714                                        .state = ODEBUG_STATE_NOTAVAILABLE,
715                                        .descr = descr };
716
717                 debug_print_object(&o, "active_state");
718         }
719
720         raw_spin_unlock_irqrestore(&db->lock, flags);
721 }
722 EXPORT_SYMBOL_GPL(debug_object_active_state);
723
724 #ifdef CONFIG_DEBUG_OBJECTS_FREE
725 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
726 {
727         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
728         struct hlist_node *tmp;
729         HLIST_HEAD(freelist);
730         struct debug_obj_descr *descr;
731         enum debug_obj_state state;
732         struct debug_bucket *db;
733         struct debug_obj *obj;
734         int cnt;
735
736         saddr = (unsigned long) address;
737         eaddr = saddr + size;
738         paddr = saddr & ODEBUG_CHUNK_MASK;
739         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
740         chunks >>= ODEBUG_CHUNK_SHIFT;
741
742         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
743                 db = get_bucket(paddr);
744
745 repeat:
746                 cnt = 0;
747                 raw_spin_lock_irqsave(&db->lock, flags);
748                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
749                         cnt++;
750                         oaddr = (unsigned long) obj->object;
751                         if (oaddr < saddr || oaddr >= eaddr)
752                                 continue;
753
754                         switch (obj->state) {
755                         case ODEBUG_STATE_ACTIVE:
756                                 debug_print_object(obj, "free");
757                                 descr = obj->descr;
758                                 state = obj->state;
759                                 raw_spin_unlock_irqrestore(&db->lock, flags);
760                                 debug_object_fixup(descr->fixup_free,
761                                                    (void *) oaddr, state);
762                                 goto repeat;
763                         default:
764                                 hlist_del(&obj->node);
765                                 hlist_add_head(&obj->node, &freelist);
766                                 break;
767                         }
768                 }
769                 raw_spin_unlock_irqrestore(&db->lock, flags);
770
771                 /* Now free them */
772                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
773                         hlist_del(&obj->node);
774                         free_object(obj);
775                 }
776
777                 if (cnt > debug_objects_maxchain)
778                         debug_objects_maxchain = cnt;
779         }
780 }
781
782 void debug_check_no_obj_freed(const void *address, unsigned long size)
783 {
784         if (debug_objects_enabled)
785                 __debug_check_no_obj_freed(address, size);
786 }
787 #endif
788
789 #ifdef CONFIG_DEBUG_FS
790
791 static int debug_stats_show(struct seq_file *m, void *v)
792 {
793         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
794         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
795         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
796         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
797         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
798         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
799         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
800         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
801         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
802         return 0;
803 }
804
805 static int debug_stats_open(struct inode *inode, struct file *filp)
806 {
807         return single_open(filp, debug_stats_show, NULL);
808 }
809
810 static const struct file_operations debug_stats_fops = {
811         .open           = debug_stats_open,
812         .read           = seq_read,
813         .llseek         = seq_lseek,
814         .release        = single_release,
815 };
816
817 static int __init debug_objects_init_debugfs(void)
818 {
819         struct dentry *dbgdir, *dbgstats;
820
821         if (!debug_objects_enabled)
822                 return 0;
823
824         dbgdir = debugfs_create_dir("debug_objects", NULL);
825         if (!dbgdir)
826                 return -ENOMEM;
827
828         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
829                                        &debug_stats_fops);
830         if (!dbgstats)
831                 goto err;
832
833         return 0;
834
835 err:
836         debugfs_remove(dbgdir);
837
838         return -ENOMEM;
839 }
840 __initcall(debug_objects_init_debugfs);
841
842 #else
843 static inline void debug_objects_init_debugfs(void) { }
844 #endif
845
846 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
847
848 /* Random data structure for the self test */
849 struct self_test {
850         unsigned long   dummy1[6];
851         int             static_init;
852         unsigned long   dummy2[3];
853 };
854
855 static __initdata struct debug_obj_descr descr_type_test;
856
857 static bool __init is_static_object(void *addr)
858 {
859         struct self_test *obj = addr;
860
861         return obj->static_init;
862 }
863
864 /*
865  * fixup_init is called when:
866  * - an active object is initialized
867  */
868 static bool __init fixup_init(void *addr, enum debug_obj_state state)
869 {
870         struct self_test *obj = addr;
871
872         switch (state) {
873         case ODEBUG_STATE_ACTIVE:
874                 debug_object_deactivate(obj, &descr_type_test);
875                 debug_object_init(obj, &descr_type_test);
876                 return true;
877         default:
878                 return false;
879         }
880 }
881
882 /*
883  * fixup_activate is called when:
884  * - an active object is activated
885  * - an unknown non-static object is activated
886  */
887 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
888 {
889         struct self_test *obj = addr;
890
891         switch (state) {
892         case ODEBUG_STATE_NOTAVAILABLE:
893                 return true;
894         case ODEBUG_STATE_ACTIVE:
895                 debug_object_deactivate(obj, &descr_type_test);
896                 debug_object_activate(obj, &descr_type_test);
897                 return true;
898
899         default:
900                 return false;
901         }
902 }
903
904 /*
905  * fixup_destroy is called when:
906  * - an active object is destroyed
907  */
908 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
909 {
910         struct self_test *obj = addr;
911
912         switch (state) {
913         case ODEBUG_STATE_ACTIVE:
914                 debug_object_deactivate(obj, &descr_type_test);
915                 debug_object_destroy(obj, &descr_type_test);
916                 return true;
917         default:
918                 return false;
919         }
920 }
921
922 /*
923  * fixup_free is called when:
924  * - an active object is freed
925  */
926 static bool __init fixup_free(void *addr, enum debug_obj_state state)
927 {
928         struct self_test *obj = addr;
929
930         switch (state) {
931         case ODEBUG_STATE_ACTIVE:
932                 debug_object_deactivate(obj, &descr_type_test);
933                 debug_object_free(obj, &descr_type_test);
934                 return true;
935         default:
936                 return false;
937         }
938 }
939
940 static int __init
941 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
942 {
943         struct debug_bucket *db;
944         struct debug_obj *obj;
945         unsigned long flags;
946         int res = -EINVAL;
947
948         db = get_bucket((unsigned long) addr);
949
950         raw_spin_lock_irqsave(&db->lock, flags);
951
952         obj = lookup_object(addr, db);
953         if (!obj && state != ODEBUG_STATE_NONE) {
954                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
955                 goto out;
956         }
957         if (obj && obj->state != state) {
958                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
959                        obj->state, state);
960                 goto out;
961         }
962         if (fixups != debug_objects_fixups) {
963                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
964                        fixups, debug_objects_fixups);
965                 goto out;
966         }
967         if (warnings != debug_objects_warnings) {
968                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
969                        warnings, debug_objects_warnings);
970                 goto out;
971         }
972         res = 0;
973 out:
974         raw_spin_unlock_irqrestore(&db->lock, flags);
975         if (res)
976                 debug_objects_enabled = 0;
977         return res;
978 }
979
980 static __initdata struct debug_obj_descr descr_type_test = {
981         .name                   = "selftest",
982         .is_static_object       = is_static_object,
983         .fixup_init             = fixup_init,
984         .fixup_activate         = fixup_activate,
985         .fixup_destroy          = fixup_destroy,
986         .fixup_free             = fixup_free,
987 };
988
989 static __initdata struct self_test obj = { .static_init = 0 };
990
991 static void __init debug_objects_selftest(void)
992 {
993         int fixups, oldfixups, warnings, oldwarnings;
994         unsigned long flags;
995
996         local_irq_save(flags);
997
998         fixups = oldfixups = debug_objects_fixups;
999         warnings = oldwarnings = debug_objects_warnings;
1000         descr_test = &descr_type_test;
1001
1002         debug_object_init(&obj, &descr_type_test);
1003         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1004                 goto out;
1005         debug_object_activate(&obj, &descr_type_test);
1006         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1007                 goto out;
1008         debug_object_activate(&obj, &descr_type_test);
1009         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1010                 goto out;
1011         debug_object_deactivate(&obj, &descr_type_test);
1012         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1013                 goto out;
1014         debug_object_destroy(&obj, &descr_type_test);
1015         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1016                 goto out;
1017         debug_object_init(&obj, &descr_type_test);
1018         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1019                 goto out;
1020         debug_object_activate(&obj, &descr_type_test);
1021         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1022                 goto out;
1023         debug_object_deactivate(&obj, &descr_type_test);
1024         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1025                 goto out;
1026         debug_object_free(&obj, &descr_type_test);
1027         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1028                 goto out;
1029
1030         obj.static_init = 1;
1031         debug_object_activate(&obj, &descr_type_test);
1032         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1033                 goto out;
1034         debug_object_init(&obj, &descr_type_test);
1035         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1036                 goto out;
1037         debug_object_free(&obj, &descr_type_test);
1038         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1039                 goto out;
1040
1041 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1042         debug_object_init(&obj, &descr_type_test);
1043         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1044                 goto out;
1045         debug_object_activate(&obj, &descr_type_test);
1046         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1047                 goto out;
1048         __debug_check_no_obj_freed(&obj, sizeof(obj));
1049         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1050                 goto out;
1051 #endif
1052         pr_info("selftest passed\n");
1053
1054 out:
1055         debug_objects_fixups = oldfixups;
1056         debug_objects_warnings = oldwarnings;
1057         descr_test = NULL;
1058
1059         local_irq_restore(flags);
1060 }
1061 #else
1062 static inline void debug_objects_selftest(void) { }
1063 #endif
1064
1065 /*
1066  * Called during early boot to initialize the hash buckets and link
1067  * the static object pool objects into the poll list. After this call
1068  * the object tracker is fully operational.
1069  */
1070 void __init debug_objects_early_init(void)
1071 {
1072         int i;
1073
1074         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1075                 raw_spin_lock_init(&obj_hash[i].lock);
1076
1077         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1078                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1079 }
1080
1081 /*
1082  * Convert the statically allocated objects to dynamic ones:
1083  */
1084 static int __init debug_objects_replace_static_objects(void)
1085 {
1086         struct debug_bucket *db = obj_hash;
1087         struct hlist_node *tmp;
1088         struct debug_obj *obj, *new;
1089         HLIST_HEAD(objects);
1090         int i, cnt = 0;
1091
1092         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1093                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1094                 if (!obj)
1095                         goto free;
1096                 hlist_add_head(&obj->node, &objects);
1097         }
1098
1099         /*
1100          * When debug_objects_mem_init() is called we know that only
1101          * one CPU is up, so disabling interrupts is enough
1102          * protection. This avoids the lockdep hell of lock ordering.
1103          */
1104         local_irq_disable();
1105
1106         /* Remove the statically allocated objects from the pool */
1107         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1108                 hlist_del(&obj->node);
1109         /* Move the allocated objects to the pool */
1110         hlist_move_list(&objects, &obj_pool);
1111
1112         /* Replace the active object references */
1113         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1114                 hlist_move_list(&db->list, &objects);
1115
1116                 hlist_for_each_entry(obj, &objects, node) {
1117                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1118                         hlist_del(&new->node);
1119                         /* copy object data */
1120                         *new = *obj;
1121                         hlist_add_head(&new->node, &db->list);
1122                         cnt++;
1123                 }
1124         }
1125         local_irq_enable();
1126
1127         pr_debug("%d of %d active objects replaced\n",
1128                  cnt, obj_pool_used);
1129         return 0;
1130 free:
1131         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1132                 hlist_del(&obj->node);
1133                 kmem_cache_free(obj_cache, obj);
1134         }
1135         return -ENOMEM;
1136 }
1137
1138 /*
1139  * Called after the kmem_caches are functional to setup a dedicated
1140  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1141  * prevents that the debug code is called on kmem_cache_free() for the
1142  * debug tracker objects to avoid recursive calls.
1143  */
1144 void __init debug_objects_mem_init(void)
1145 {
1146         if (!debug_objects_enabled)
1147                 return;
1148
1149         obj_cache = kmem_cache_create("debug_objects_cache",
1150                                       sizeof (struct debug_obj), 0,
1151                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1152                                       NULL);
1153
1154         if (!obj_cache || debug_objects_replace_static_objects()) {
1155                 debug_objects_enabled = 0;
1156                 if (obj_cache)
1157                         kmem_cache_destroy(obj_cache);
1158                 pr_warn("out of memory.\n");
1159         } else
1160                 debug_objects_selftest();
1161
1162         /*
1163          * Increase the thresholds for allocating and freeing objects
1164          * according to the number of possible CPUs available in the system.
1165          */
1166         debug_objects_pool_size += num_possible_cpus() * 32;
1167         debug_objects_pool_min_level += num_possible_cpus() * 4;
1168 }