GNU Linux-libre 5.10.217-gnu1
[releases.git] / include / linux / lockdep.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12
13 #include <linux/lockdep_types.h>
14 #include <linux/smp.h>
15 #include <asm/percpu.h>
16
17 struct task_struct;
18
19 /* for sysctl */
20 extern int prove_locking;
21 extern int lock_stat;
22
23 #ifdef CONFIG_LOCKDEP
24
25 #include <linux/linkage.h>
26 #include <linux/list.h>
27 #include <linux/debug_locks.h>
28 #include <linux/stacktrace.h>
29
30 static inline void lockdep_copy_map(struct lockdep_map *to,
31                                     struct lockdep_map *from)
32 {
33         int i;
34
35         *to = *from;
36         /*
37          * Since the class cache can be modified concurrently we could observe
38          * half pointers (64bit arch using 32bit copy insns). Therefore clear
39          * the caches and take the performance hit.
40          *
41          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42          *     that relies on cache abuse.
43          */
44         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45                 to->class_cache[i] = NULL;
46 }
47
48 /*
49  * Every lock has a list of other locks that were taken after it.
50  * We only grow the list, never remove from it:
51  */
52 struct lock_list {
53         struct list_head                entry;
54         struct lock_class               *class;
55         struct lock_class               *links_to;
56         const struct lock_trace         *trace;
57         u16                             distance;
58         /* bitmap of different dependencies from head to this */
59         u8                              dep;
60         /* used by BFS to record whether "prev -> this" only has -(*R)-> */
61         u8                              only_xr;
62
63         /*
64          * The parent field is used to implement breadth-first search, and the
65          * bit 0 is reused to indicate if the lock has been accessed in BFS.
66          */
67         struct lock_list                *parent;
68 };
69
70 /**
71  * struct lock_chain - lock dependency chain record
72  *
73  * @irq_context: the same as irq_context in held_lock below
74  * @depth:       the number of held locks in this chain
75  * @base:        the index in chain_hlocks for this chain
76  * @entry:       the collided lock chains in lock_chain hash list
77  * @chain_key:   the hash key of this lock_chain
78  */
79 struct lock_chain {
80         /* see BUILD_BUG_ON()s in add_chain_cache() */
81         unsigned int                    irq_context :  2,
82                                         depth       :  6,
83                                         base        : 24;
84         /* 4 byte hole */
85         struct hlist_node               entry;
86         u64                             chain_key;
87 };
88
89 #define MAX_LOCKDEP_KEYS_BITS           13
90 #define MAX_LOCKDEP_KEYS                (1UL << MAX_LOCKDEP_KEYS_BITS)
91 #define INITIAL_CHAIN_KEY               -1
92
93 struct held_lock {
94         /*
95          * One-way hash of the dependency chain up to this point. We
96          * hash the hashes step by step as the dependency chain grows.
97          *
98          * We use it for dependency-caching and we skip detection
99          * passes and dependency-updates if there is a cache-hit, so
100          * it is absolutely critical for 100% coverage of the validator
101          * to have a unique key value for every unique dependency path
102          * that can occur in the system, to make a unique hash value
103          * as likely as possible - hence the 64-bit width.
104          *
105          * The task struct holds the current hash value (initialized
106          * with zero), here we store the previous hash value:
107          */
108         u64                             prev_chain_key;
109         unsigned long                   acquire_ip;
110         struct lockdep_map              *instance;
111         struct lockdep_map              *nest_lock;
112 #ifdef CONFIG_LOCK_STAT
113         u64                             waittime_stamp;
114         u64                             holdtime_stamp;
115 #endif
116         /*
117          * class_idx is zero-indexed; it points to the element in
118          * lock_classes this held lock instance belongs to. class_idx is in
119          * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120          */
121         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
122         /*
123          * The lock-stack is unified in that the lock chains of interrupt
124          * contexts nest ontop of process context chains, but we 'separate'
125          * the hashes by starting with 0 if we cross into an interrupt
126          * context, and we also keep do not add cross-context lock
127          * dependencies - the lock usage graph walking covers that area
128          * anyway, and we'd just unnecessarily increase the number of
129          * dependencies otherwise. [Note: hardirq and softirq contexts
130          * are separated from each other too.]
131          *
132          * The following field is used to detect when we cross into an
133          * interrupt context:
134          */
135         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136         unsigned int trylock:1;                                         /* 16 bits */
137
138         unsigned int read:2;        /* see lock_acquire() comment */
139         unsigned int check:1;       /* see lock_acquire() comment */
140         unsigned int hardirqs_off:1;
141         unsigned int references:12;                                     /* 32 bits */
142         unsigned int pin_count;
143 };
144
145 /*
146  * Initialization, self-test and debugging-output methods:
147  */
148 extern void lockdep_init(void);
149 extern void lockdep_reset(void);
150 extern void lockdep_reset_lock(struct lockdep_map *lock);
151 extern void lockdep_free_key_range(void *start, unsigned long size);
152 extern asmlinkage void lockdep_sys_exit(void);
153 extern void lockdep_set_selftest_task(struct task_struct *task);
154
155 extern void lockdep_init_task(struct task_struct *task);
156
157 /*
158  * Split the recrursion counter in two to readily detect 'off' vs recursion.
159  */
160 #define LOCKDEP_RECURSION_BITS  16
161 #define LOCKDEP_OFF             (1U << LOCKDEP_RECURSION_BITS)
162 #define LOCKDEP_RECURSION_MASK  (LOCKDEP_OFF - 1)
163
164 /*
165  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166  * to header dependencies.
167  */
168
169 #define lockdep_off()                                   \
170 do {                                                    \
171         current->lockdep_recursion += LOCKDEP_OFF;      \
172 } while (0)
173
174 #define lockdep_on()                                    \
175 do {                                                    \
176         current->lockdep_recursion -= LOCKDEP_OFF;      \
177 } while (0)
178
179 extern void lockdep_register_key(struct lock_class_key *key);
180 extern void lockdep_unregister_key(struct lock_class_key *key);
181
182 /*
183  * These methods are used by specific locking variants (spinlocks,
184  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
185  * to lockdep:
186  */
187
188 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
189         struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
190
191 static inline void
192 lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
193                        struct lock_class_key *key, int subclass, u8 inner, u8 outer)
194 {
195         lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
196 }
197
198 static inline void
199 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
200                       struct lock_class_key *key, int subclass, u8 inner)
201 {
202         lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
203 }
204
205 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
206                              struct lock_class_key *key, int subclass)
207 {
208         lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
209 }
210
211 /*
212  * Reinitialize a lock key - for cases where there is special locking or
213  * special initialization of locks so that the validator gets the scope
214  * of dependencies wrong: they are either too broad (they need a class-split)
215  * or they are too narrow (they suffer from a false class-split):
216  */
217 #define lockdep_set_class(lock, key)                            \
218         lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,   \
219                               (lock)->dep_map.wait_type_inner,  \
220                               (lock)->dep_map.wait_type_outer,  \
221                               (lock)->dep_map.lock_type)
222
223 #define lockdep_set_class_and_name(lock, key, name)             \
224         lockdep_init_map_type(&(lock)->dep_map, name, key, 0,   \
225                               (lock)->dep_map.wait_type_inner,  \
226                               (lock)->dep_map.wait_type_outer,  \
227                               (lock)->dep_map.lock_type)
228
229 #define lockdep_set_class_and_subclass(lock, key, sub)          \
230         lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
231                               (lock)->dep_map.wait_type_inner,  \
232                               (lock)->dep_map.wait_type_outer,  \
233                               (lock)->dep_map.lock_type)
234
235 #define lockdep_set_subclass(lock, sub)                                 \
236         lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
237                               (lock)->dep_map.wait_type_inner,          \
238                               (lock)->dep_map.wait_type_outer,          \
239                               (lock)->dep_map.lock_type)
240
241 #define lockdep_set_novalidate_class(lock) \
242         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
243
244 /*
245  * Compare locking classes
246  */
247 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
248
249 static inline int lockdep_match_key(struct lockdep_map *lock,
250                                     struct lock_class_key *key)
251 {
252         return lock->key == key;
253 }
254
255 /*
256  * Acquire a lock.
257  *
258  * Values for "read":
259  *
260  *   0: exclusive (write) acquire
261  *   1: read-acquire (no recursion allowed)
262  *   2: read-acquire with same-instance recursion allowed
263  *
264  * Values for check:
265  *
266  *   0: simple checks (freeing, held-at-exit-time, etc.)
267  *   1: full validation
268  */
269 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
270                          int trylock, int read, int check,
271                          struct lockdep_map *nest_lock, unsigned long ip);
272
273 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
274
275 /*
276  * Same "read" as for lock_acquire(), except -1 means any.
277  */
278 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
279
280 static inline int lock_is_held(const struct lockdep_map *lock)
281 {
282         return lock_is_held_type(lock, -1);
283 }
284
285 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
286 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
287
288 extern void lock_set_class(struct lockdep_map *lock, const char *name,
289                            struct lock_class_key *key, unsigned int subclass,
290                            unsigned long ip);
291
292 static inline void lock_set_subclass(struct lockdep_map *lock,
293                 unsigned int subclass, unsigned long ip)
294 {
295         lock_set_class(lock, lock->name, lock->key, subclass, ip);
296 }
297
298 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
299
300 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
301
302 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
303 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
304 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
305
306 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
307
308 #define lockdep_assert_held(l)  do {                            \
309                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
310         } while (0)
311
312 #define lockdep_assert_held_write(l)    do {                    \
313                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
314         } while (0)
315
316 #define lockdep_assert_held_read(l)     do {                            \
317                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
318         } while (0)
319
320 #define lockdep_assert_held_once(l)     do {                            \
321                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
322         } while (0)
323
324 #define lockdep_assert_none_held_once() do {                            \
325                 WARN_ON_ONCE(debug_locks && current->lockdep_depth);    \
326         } while (0)
327
328 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
329
330 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
331 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
332 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
333
334 #else /* !CONFIG_LOCKDEP */
335
336 static inline void lockdep_init_task(struct task_struct *task)
337 {
338 }
339
340 static inline void lockdep_off(void)
341 {
342 }
343
344 static inline void lockdep_on(void)
345 {
346 }
347
348 static inline void lockdep_set_selftest_task(struct task_struct *task)
349 {
350 }
351
352 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
353 # define lock_release(l, i)                     do { } while (0)
354 # define lock_downgrade(l, i)                   do { } while (0)
355 # define lock_set_class(l, n, k, s, i)          do { } while (0)
356 # define lock_set_subclass(l, s, i)             do { } while (0)
357 # define lockdep_init()                         do { } while (0)
358 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
359                 do { (void)(name); (void)(key); } while (0)
360 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
361                 do { (void)(name); (void)(key); } while (0)
362 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
363                 do { (void)(name); (void)(key); } while (0)
364 # define lockdep_init_map(lock, name, key, sub) \
365                 do { (void)(name); (void)(key); } while (0)
366 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
367 # define lockdep_set_class_and_name(lock, key, name) \
368                 do { (void)(key); (void)(name); } while (0)
369 #define lockdep_set_class_and_subclass(lock, key, sub) \
370                 do { (void)(key); } while (0)
371 #define lockdep_set_subclass(lock, sub)         do { } while (0)
372
373 #define lockdep_set_novalidate_class(lock) do { } while (0)
374
375 /*
376  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
377  * case since the result is not well defined and the caller should rather
378  * #ifdef the call himself.
379  */
380
381 # define lockdep_reset()                do { debug_locks = 1; } while (0)
382 # define lockdep_free_key_range(start, size)    do { } while (0)
383 # define lockdep_sys_exit()                     do { } while (0)
384
385 static inline void lockdep_register_key(struct lock_class_key *key)
386 {
387 }
388
389 static inline void lockdep_unregister_key(struct lock_class_key *key)
390 {
391 }
392
393 #define lockdep_depth(tsk)      (0)
394
395 #define lockdep_is_held_type(l, r)              (1)
396
397 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
398 #define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
399 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
400 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
401 #define lockdep_assert_none_held_once() do { } while (0)
402
403 #define lockdep_recursing(tsk)                  (0)
404
405 #define NIL_COOKIE (struct pin_cookie){ }
406
407 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
408 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
409 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
410
411 #endif /* !LOCKDEP */
412
413 enum xhlock_context_t {
414         XHLOCK_HARD,
415         XHLOCK_SOFT,
416         XHLOCK_CTX_NR,
417 };
418
419 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
420 /*
421  * To initialize a lockdep_map statically use this macro.
422  * Note that _name must not be NULL.
423  */
424 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
425         { .name = (_name), .key = (void *)(_key), }
426
427 static inline void lockdep_invariant_state(bool force) {}
428 static inline void lockdep_free_task(struct task_struct *task) {}
429
430 #ifdef CONFIG_LOCK_STAT
431
432 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
433 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
434
435 #define LOCK_CONTENDED(_lock, try, lock)                        \
436 do {                                                            \
437         if (!try(_lock)) {                                      \
438                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
439                 lock(_lock);                                    \
440         }                                                       \
441         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
442 } while (0)
443
444 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
445 ({                                                              \
446         int ____err = 0;                                        \
447         if (!try(_lock)) {                                      \
448                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
449                 ____err = lock(_lock);                          \
450         }                                                       \
451         if (!____err)                                           \
452                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
453         ____err;                                                \
454 })
455
456 #else /* CONFIG_LOCK_STAT */
457
458 #define lock_contended(lockdep_map, ip) do {} while (0)
459 #define lock_acquired(lockdep_map, ip) do {} while (0)
460
461 #define LOCK_CONTENDED(_lock, try, lock) \
462         lock(_lock)
463
464 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
465         lock(_lock)
466
467 #endif /* CONFIG_LOCK_STAT */
468
469 #ifdef CONFIG_LOCKDEP
470
471 /*
472  * On lockdep we dont want the hand-coded irq-enable of
473  * _raw_*_lock_flags() code, because lockdep assumes
474  * that interrupts are not re-enabled during lock-acquire:
475  */
476 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
477         LOCK_CONTENDED((_lock), (try), (lock))
478
479 #else /* CONFIG_LOCKDEP */
480
481 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
482         lockfl((_lock), (flags))
483
484 #endif /* CONFIG_LOCKDEP */
485
486 #ifdef CONFIG_PROVE_LOCKING
487 extern void print_irqtrace_events(struct task_struct *curr);
488 #else
489 static inline void print_irqtrace_events(struct task_struct *curr)
490 {
491 }
492 #endif
493
494 /* Variable used to make lockdep treat read_lock() as recursive in selftests */
495 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
496 extern unsigned int force_read_lock_recursive;
497 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
498 #define force_read_lock_recursive 0
499 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
500
501 #ifdef CONFIG_LOCKDEP
502 extern bool read_lock_is_recursive(void);
503 #else /* CONFIG_LOCKDEP */
504 /* If !LOCKDEP, the value is meaningless */
505 #define read_lock_is_recursive() 0
506 #endif
507
508 /*
509  * For trivial one-depth nesting of a lock-class, the following
510  * global define can be used. (Subsystems with multiple levels
511  * of nesting should define their own lock-nesting subclasses.)
512  */
513 #define SINGLE_DEPTH_NESTING                    1
514
515 /*
516  * Map the dependency ops to NOP or to real lockdep ops, depending
517  * on the per lock-class debug mode:
518  */
519
520 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
521 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
522 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
523
524 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
525 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
526 #define spin_release(l, i)                      lock_release(l, i)
527
528 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
529 #define rwlock_acquire_read(l, s, t, i)                                 \
530 do {                                                                    \
531         if (read_lock_is_recursive())                                   \
532                 lock_acquire_shared_recursive(l, s, t, NULL, i);        \
533         else                                                            \
534                 lock_acquire_shared(l, s, t, NULL, i);                  \
535 } while (0)
536
537 #define rwlock_release(l, i)                    lock_release(l, i)
538
539 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
540 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
541 #define seqcount_release(l, i)                  lock_release(l, i)
542
543 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
544 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
545 #define mutex_release(l, i)                     lock_release(l, i)
546
547 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
548 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
549 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
550 #define rwsem_release(l, i)                     lock_release(l, i)
551
552 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
553 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
554 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
555 #define lock_map_release(l)                     lock_release(l, _THIS_IP_)
556
557 #ifdef CONFIG_PROVE_LOCKING
558 # define might_lock(lock)                                               \
559 do {                                                                    \
560         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
561         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
562         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
563 } while (0)
564 # define might_lock_read(lock)                                          \
565 do {                                                                    \
566         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
567         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
568         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
569 } while (0)
570 # define might_lock_nested(lock, subclass)                              \
571 do {                                                                    \
572         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
573         lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
574                      _THIS_IP_);                                        \
575         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
576 } while (0)
577
578 DECLARE_PER_CPU(int, hardirqs_enabled);
579 DECLARE_PER_CPU(int, hardirq_context);
580 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
581
582 #define __lockdep_enabled       (debug_locks && !this_cpu_read(lockdep_recursion))
583
584 #define lockdep_assert_irqs_enabled()                                   \
585 do {                                                                    \
586         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
587 } while (0)
588
589 #define lockdep_assert_irqs_disabled()                                  \
590 do {                                                                    \
591         WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
592 } while (0)
593
594 #define lockdep_assert_in_irq()                                         \
595 do {                                                                    \
596         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
597 } while (0)
598
599 #define lockdep_assert_preemption_enabled()                             \
600 do {                                                                    \
601         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
602                      __lockdep_enabled                  &&              \
603                      (preempt_count() != 0              ||              \
604                       !this_cpu_read(hardirqs_enabled)));               \
605 } while (0)
606
607 #define lockdep_assert_preemption_disabled()                            \
608 do {                                                                    \
609         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
610                      __lockdep_enabled                  &&              \
611                      (preempt_count() == 0              &&              \
612                       this_cpu_read(hardirqs_enabled)));                \
613 } while (0)
614
615 #else
616 # define might_lock(lock) do { } while (0)
617 # define might_lock_read(lock) do { } while (0)
618 # define might_lock_nested(lock, subclass) do { } while (0)
619
620 # define lockdep_assert_irqs_enabled() do { } while (0)
621 # define lockdep_assert_irqs_disabled() do { } while (0)
622 # define lockdep_assert_in_irq() do { } while (0)
623
624 # define lockdep_assert_preemption_enabled() do { } while (0)
625 # define lockdep_assert_preemption_disabled() do { } while (0)
626 #endif
627
628 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
629
630 # define lockdep_assert_RT_in_threaded_ctx() do {                       \
631                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
632                           lockdep_hardirq_context() &&                  \
633                           !(current->hardirq_threaded || current->irq_config),  \
634                           "Not in threaded context on PREEMPT_RT as expected\n");       \
635 } while (0)
636
637 #else
638
639 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
640
641 #endif
642
643 #ifdef CONFIG_LOCKDEP
644 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
645 #else
646 static inline void
647 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
648 {
649 }
650 #endif
651
652 #endif /* __LINUX_LOCKDEP_H */