1 // SPDX-License-Identifier: GPL-2.0
3 * kernel/lockdep_proc.c
5 * Runtime locking correctness validator
7 * Started by Ingo Molnar:
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
12 * Code for /proc/lockdep and /proc/lockdep_stats:
15 #include <linux/export.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/kallsyms.h>
19 #include <linux/debug_locks.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sort.h>
22 #include <linux/uaccess.h>
23 #include <asm/div64.h>
25 #include "lockdep_internals.h"
27 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
29 return seq_list_next(v, &all_lock_classes, pos);
32 static void *l_start(struct seq_file *m, loff_t *pos)
34 return seq_list_start_head(&all_lock_classes, *pos);
37 static void l_stop(struct seq_file *m, void *v)
41 static void print_name(struct seq_file *m, struct lock_class *class)
43 char str[KSYM_NAME_LEN];
44 const char *name = class->name;
47 name = __get_key_name(class->key, str);
48 seq_printf(m, "%s", name);
50 seq_printf(m, "%s", name);
51 if (class->name_version > 1)
52 seq_printf(m, "#%d", class->name_version);
54 seq_printf(m, "/%d", class->subclass);
58 static int l_show(struct seq_file *m, void *v)
60 struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
61 struct lock_list *entry;
62 char usage[LOCK_USAGE_CHARS];
64 if (v == &all_lock_classes) {
65 seq_printf(m, "all lock classes:\n");
69 seq_printf(m, "%p", class->key);
70 #ifdef CONFIG_DEBUG_LOCKDEP
71 seq_printf(m, " OPS:%8ld", class->ops);
73 #ifdef CONFIG_PROVE_LOCKING
74 seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
75 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
78 get_usage_chars(class, usage);
79 seq_printf(m, " %s", usage);
85 list_for_each_entry(entry, &class->locks_after, entry) {
86 if (entry->distance == 1) {
87 seq_printf(m, " -> [%p] ", entry->class->key);
88 print_name(m, entry->class);
97 static const struct seq_operations lockdep_ops = {
104 static int lockdep_open(struct inode *inode, struct file *file)
106 return seq_open(file, &lockdep_ops);
109 static const struct file_operations proc_lockdep_operations = {
110 .open = lockdep_open,
113 .release = seq_release,
116 #ifdef CONFIG_PROVE_LOCKING
117 static void *lc_start(struct seq_file *m, loff_t *pos)
120 return SEQ_START_TOKEN;
122 if (*pos - 1 < nr_lock_chains)
123 return lock_chains + (*pos - 1);
128 static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
131 return lc_start(m, pos);
134 static void lc_stop(struct seq_file *m, void *v)
138 static int lc_show(struct seq_file *m, void *v)
140 struct lock_chain *chain = v;
141 struct lock_class *class;
144 if (v == SEQ_START_TOKEN) {
145 if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
146 seq_printf(m, "(buggered) ");
147 seq_printf(m, "all lock chains:\n");
151 seq_printf(m, "irq_context: %d\n", chain->irq_context);
153 for (i = 0; i < chain->depth; i++) {
154 class = lock_chain_get_class(chain, i);
158 seq_printf(m, "[%p] ", class->key);
159 print_name(m, class);
167 static const struct seq_operations lockdep_chains_ops = {
174 static int lockdep_chains_open(struct inode *inode, struct file *file)
176 return seq_open(file, &lockdep_chains_ops);
179 static const struct file_operations proc_lockdep_chains_operations = {
180 .open = lockdep_chains_open,
183 .release = seq_release,
185 #endif /* CONFIG_PROVE_LOCKING */
187 static void lockdep_stats_debug_show(struct seq_file *m)
189 #ifdef CONFIG_DEBUG_LOCKDEP
190 unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
191 hi2 = debug_atomic_read(hardirqs_off_events),
192 hr1 = debug_atomic_read(redundant_hardirqs_on),
193 hr2 = debug_atomic_read(redundant_hardirqs_off),
194 si1 = debug_atomic_read(softirqs_on_events),
195 si2 = debug_atomic_read(softirqs_off_events),
196 sr1 = debug_atomic_read(redundant_softirqs_on),
197 sr2 = debug_atomic_read(redundant_softirqs_off);
199 seq_printf(m, " chain lookup misses: %11llu\n",
200 debug_atomic_read(chain_lookup_misses));
201 seq_printf(m, " chain lookup hits: %11llu\n",
202 debug_atomic_read(chain_lookup_hits));
203 seq_printf(m, " cyclic checks: %11llu\n",
204 debug_atomic_read(nr_cyclic_checks));
205 seq_printf(m, " redundant checks: %11llu\n",
206 debug_atomic_read(nr_redundant_checks));
207 seq_printf(m, " redundant links: %11llu\n",
208 debug_atomic_read(nr_redundant));
209 seq_printf(m, " find-mask forwards checks: %11llu\n",
210 debug_atomic_read(nr_find_usage_forwards_checks));
211 seq_printf(m, " find-mask backwards checks: %11llu\n",
212 debug_atomic_read(nr_find_usage_backwards_checks));
214 seq_printf(m, " hardirq on events: %11llu\n", hi1);
215 seq_printf(m, " hardirq off events: %11llu\n", hi2);
216 seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
217 seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
218 seq_printf(m, " softirq on events: %11llu\n", si1);
219 seq_printf(m, " softirq off events: %11llu\n", si2);
220 seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
221 seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
225 static int lockdep_stats_show(struct seq_file *m, void *v)
227 unsigned long nr_unused = 0, nr_uncategorized = 0,
228 nr_irq_safe = 0, nr_irq_unsafe = 0,
229 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
230 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
231 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
232 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
233 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
234 sum_forward_deps = 0;
236 #ifdef CONFIG_PROVE_LOCKING
237 struct lock_class *class;
239 list_for_each_entry(class, &all_lock_classes, lock_entry) {
241 if (class->usage_mask == 0)
243 if (class->usage_mask == LOCKF_USED)
245 if (class->usage_mask & LOCKF_USED_IN_IRQ)
247 if (class->usage_mask & LOCKF_ENABLED_IRQ)
249 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
251 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
253 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
255 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
257 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
259 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
260 nr_irq_read_unsafe++;
261 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
262 nr_softirq_read_safe++;
263 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
264 nr_softirq_read_unsafe++;
265 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
266 nr_hardirq_read_safe++;
267 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
268 nr_hardirq_read_unsafe++;
270 sum_forward_deps += lockdep_count_forward_deps(class);
272 #ifdef CONFIG_DEBUG_LOCKDEP
273 DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
277 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
278 nr_lock_classes, MAX_LOCKDEP_KEYS);
279 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
280 nr_list_entries, MAX_LOCKDEP_ENTRIES);
281 seq_printf(m, " indirect dependencies: %11lu\n",
285 * Total number of dependencies:
287 * All irq-safe locks may nest inside irq-unsafe locks,
288 * plus all the other known dependencies:
290 seq_printf(m, " all direct dependencies: %11lu\n",
291 nr_irq_unsafe * nr_irq_safe +
292 nr_hardirq_unsafe * nr_hardirq_safe +
295 #ifdef CONFIG_PROVE_LOCKING
296 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
297 nr_lock_chains, MAX_LOCKDEP_CHAINS);
298 seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
299 nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
302 #ifdef CONFIG_TRACE_IRQFLAGS
303 seq_printf(m, " in-hardirq chains: %11u\n",
305 seq_printf(m, " in-softirq chains: %11u\n",
308 seq_printf(m, " in-process chains: %11u\n",
310 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
311 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
312 seq_printf(m, " combined max dependencies: %11u\n",
313 (nr_hardirq_chains + 1) *
314 (nr_softirq_chains + 1) *
315 (nr_process_chains + 1)
317 seq_printf(m, " hardirq-safe locks: %11lu\n",
319 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
321 seq_printf(m, " softirq-safe locks: %11lu\n",
323 seq_printf(m, " softirq-unsafe locks: %11lu\n",
325 seq_printf(m, " irq-safe locks: %11lu\n",
327 seq_printf(m, " irq-unsafe locks: %11lu\n",
330 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
331 nr_hardirq_read_safe);
332 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
333 nr_hardirq_read_unsafe);
334 seq_printf(m, " softirq-read-safe locks: %11lu\n",
335 nr_softirq_read_safe);
336 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
337 nr_softirq_read_unsafe);
338 seq_printf(m, " irq-read-safe locks: %11lu\n",
340 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
343 seq_printf(m, " uncategorized locks: %11lu\n",
345 seq_printf(m, " unused locks: %11lu\n",
347 seq_printf(m, " max locking depth: %11u\n",
349 #ifdef CONFIG_PROVE_LOCKING
350 seq_printf(m, " max bfs queue depth: %11u\n",
351 max_bfs_queue_depth);
353 lockdep_stats_debug_show(m);
354 seq_printf(m, " debug_locks: %11u\n",
360 static int lockdep_stats_open(struct inode *inode, struct file *file)
362 return single_open(file, lockdep_stats_show, NULL);
365 static const struct file_operations proc_lockdep_stats_operations = {
366 .open = lockdep_stats_open,
369 .release = single_release,
372 #ifdef CONFIG_LOCK_STAT
374 struct lock_stat_data {
375 struct lock_class *class;
376 struct lock_class_stats stats;
379 struct lock_stat_seq {
380 struct lock_stat_data *iter_end;
381 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
385 * sort on absolute number of contentions
387 static int lock_stat_cmp(const void *l, const void *r)
389 const struct lock_stat_data *dl = l, *dr = r;
390 unsigned long nl, nr;
392 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
393 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
398 static void seq_line(struct seq_file *m, char c, int offset, int length)
402 for (i = 0; i < offset; i++)
404 for (i = 0; i < length; i++)
405 seq_printf(m, "%c", c);
409 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
414 nr += 5; /* for display rounding */
415 div = div_s64_rem(nr, 1000, &rem);
416 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
419 static void seq_time(struct seq_file *m, s64 time)
423 snprint_time(num, sizeof(num), time);
424 seq_printf(m, " %14s", num);
427 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
429 seq_printf(m, "%14lu", lt->nr);
430 seq_time(m, lt->min);
431 seq_time(m, lt->max);
432 seq_time(m, lt->total);
433 seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
436 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
438 struct lockdep_subclass_key *ckey;
439 struct lock_class_stats *stats;
440 struct lock_class *class;
446 stats = &data->stats;
449 if (class->name_version > 1)
450 namelen -= 2; /* XXX truncates versions > 9 */
454 rcu_read_lock_sched();
455 cname = rcu_dereference_sched(class->name);
456 ckey = rcu_dereference_sched(class->key);
458 if (!cname && !ckey) {
459 rcu_read_unlock_sched();
463 char str[KSYM_NAME_LEN];
464 const char *key_name;
466 key_name = __get_key_name(ckey, str);
467 snprintf(name, namelen, "%s", key_name);
469 snprintf(name, namelen, "%s", cname);
471 rcu_read_unlock_sched();
473 namelen = strlen(name);
474 if (class->name_version > 1) {
475 snprintf(name+namelen, 3, "#%d", class->name_version);
478 if (class->subclass) {
479 snprintf(name+namelen, 3, "/%d", class->subclass);
483 if (stats->write_holdtime.nr) {
484 if (stats->read_holdtime.nr)
485 seq_printf(m, "%38s-W:", name);
487 seq_printf(m, "%40s:", name);
489 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
490 seq_lock_time(m, &stats->write_waittime);
491 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
492 seq_lock_time(m, &stats->write_holdtime);
496 if (stats->read_holdtime.nr) {
497 seq_printf(m, "%38s-R:", name);
498 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
499 seq_lock_time(m, &stats->read_waittime);
500 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
501 seq_lock_time(m, &stats->read_holdtime);
505 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
508 if (stats->read_holdtime.nr)
511 for (i = 0; i < LOCKSTAT_POINTS; i++) {
514 if (class->contention_point[i] == 0)
518 seq_line(m, '-', 40-namelen, namelen);
520 snprintf(ip, sizeof(ip), "[<%p>]",
521 (void *)class->contention_point[i]);
522 seq_printf(m, "%40s %14lu %29s %pS\n",
523 name, stats->contention_point[i],
524 ip, (void *)class->contention_point[i]);
526 for (i = 0; i < LOCKSTAT_POINTS; i++) {
529 if (class->contending_point[i] == 0)
533 seq_line(m, '-', 40-namelen, namelen);
535 snprintf(ip, sizeof(ip), "[<%p>]",
536 (void *)class->contending_point[i]);
537 seq_printf(m, "%40s %14lu %29s %pS\n",
538 name, stats->contending_point[i],
539 ip, (void *)class->contending_point[i]);
543 seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
548 static void seq_header(struct seq_file *m)
550 seq_puts(m, "lock_stat version 0.4\n");
552 if (unlikely(!debug_locks))
553 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
555 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
556 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
571 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
575 static void *ls_start(struct seq_file *m, loff_t *pos)
577 struct lock_stat_seq *data = m->private;
578 struct lock_stat_data *iter;
581 return SEQ_START_TOKEN;
583 iter = data->stats + (*pos - 1);
584 if (iter >= data->iter_end)
590 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
593 return ls_start(m, pos);
596 static void ls_stop(struct seq_file *m, void *v)
600 static int ls_show(struct seq_file *m, void *v)
602 if (v == SEQ_START_TOKEN)
610 static const struct seq_operations lockstat_ops = {
617 static int lock_stat_open(struct inode *inode, struct file *file)
620 struct lock_class *class;
621 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
626 res = seq_open(file, &lockstat_ops);
628 struct lock_stat_data *iter = data->stats;
629 struct seq_file *m = file->private_data;
631 list_for_each_entry(class, &all_lock_classes, lock_entry) {
633 iter->stats = lock_stats(class);
636 data->iter_end = iter;
638 sort(data->stats, data->iter_end - data->stats,
639 sizeof(struct lock_stat_data),
640 lock_stat_cmp, NULL);
649 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
650 size_t count, loff_t *ppos)
652 struct lock_class *class;
656 if (get_user(c, buf))
662 list_for_each_entry(class, &all_lock_classes, lock_entry)
663 clear_lock_stats(class);
668 static int lock_stat_release(struct inode *inode, struct file *file)
670 struct seq_file *seq = file->private_data;
673 return seq_release(inode, file);
676 static const struct file_operations proc_lock_stat_operations = {
677 .open = lock_stat_open,
678 .write = lock_stat_write,
681 .release = lock_stat_release,
683 #endif /* CONFIG_LOCK_STAT */
685 static int __init lockdep_proc_init(void)
687 proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
688 #ifdef CONFIG_PROVE_LOCKING
689 proc_create("lockdep_chains", S_IRUSR, NULL,
690 &proc_lockdep_chains_operations);
692 proc_create("lockdep_stats", S_IRUSR, NULL,
693 &proc_lockdep_stats_operations);
695 #ifdef CONFIG_LOCK_STAT
696 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
697 &proc_lock_stat_operations);
703 __initcall(lockdep_proc_init);