2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * Code for /proc/lockdep and /proc/lockdep_stats:
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sort.h>
21 #include <asm/uaccess.h>
22 #include <asm/div64.h>
24 #include "lockdep_internals.h"
26 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28 return seq_list_next(v, &all_lock_classes, pos);
31 static void *l_start(struct seq_file *m, loff_t *pos)
33 return seq_list_start_head(&all_lock_classes, *pos);
36 static void l_stop(struct seq_file *m, void *v)
40 static void print_name(struct seq_file *m, struct lock_class *class)
43 const char *name = class->name;
46 name = __get_key_name(class->key, str);
47 seq_printf(m, "%s", name);
49 seq_printf(m, "%s", name);
50 if (class->name_version > 1)
51 seq_printf(m, "#%d", class->name_version);
53 seq_printf(m, "/%d", class->subclass);
57 static int l_show(struct seq_file *m, void *v)
59 struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
60 struct lock_list *entry;
61 char usage[LOCK_USAGE_CHARS];
63 if (v == &all_lock_classes) {
64 seq_printf(m, "all lock classes:\n");
68 seq_printf(m, "%p", class->key);
69 #ifdef CONFIG_DEBUG_LOCKDEP
70 seq_printf(m, " OPS:%8ld", class->ops);
72 #ifdef CONFIG_PROVE_LOCKING
73 seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
74 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
77 get_usage_chars(class, usage);
78 seq_printf(m, " %s", usage);
84 list_for_each_entry(entry, &class->locks_after, entry) {
85 if (entry->distance == 1) {
86 seq_printf(m, " -> [%p] ", entry->class->key);
87 print_name(m, entry->class);
96 static const struct seq_operations lockdep_ops = {
103 static int lockdep_open(struct inode *inode, struct file *file)
105 return seq_open(file, &lockdep_ops);
108 static const struct file_operations proc_lockdep_operations = {
109 .open = lockdep_open,
112 .release = seq_release,
115 #ifdef CONFIG_PROVE_LOCKING
116 static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
118 struct lock_chain *chain;
122 if (v == SEQ_START_TOKEN)
127 if (*pos - 1 < nr_lock_chains)
128 chain = lock_chains + (*pos - 1);
136 static void *lc_start(struct seq_file *m, loff_t *pos)
139 return SEQ_START_TOKEN;
141 if (*pos - 1 < nr_lock_chains)
142 return lock_chains + (*pos - 1);
147 static void lc_stop(struct seq_file *m, void *v)
151 static int lc_show(struct seq_file *m, void *v)
153 struct lock_chain *chain = v;
154 struct lock_class *class;
157 if (v == SEQ_START_TOKEN) {
158 seq_printf(m, "all lock chains:\n");
162 seq_printf(m, "irq_context: %d\n", chain->irq_context);
164 for (i = 0; i < chain->depth; i++) {
165 class = lock_chain_get_class(chain, i);
169 seq_printf(m, "[%p] ", class->key);
170 print_name(m, class);
178 static const struct seq_operations lockdep_chains_ops = {
185 static int lockdep_chains_open(struct inode *inode, struct file *file)
187 int res = seq_open(file, &lockdep_chains_ops);
189 struct seq_file *m = file->private_data;
192 m->private = lock_chains;
199 static const struct file_operations proc_lockdep_chains_operations = {
200 .open = lockdep_chains_open,
203 .release = seq_release,
205 #endif /* CONFIG_PROVE_LOCKING */
207 static void lockdep_stats_debug_show(struct seq_file *m)
209 #ifdef CONFIG_DEBUG_LOCKDEP
210 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
211 hi2 = debug_atomic_read(&hardirqs_off_events),
212 hr1 = debug_atomic_read(&redundant_hardirqs_on),
213 hr2 = debug_atomic_read(&redundant_hardirqs_off),
214 si1 = debug_atomic_read(&softirqs_on_events),
215 si2 = debug_atomic_read(&softirqs_off_events),
216 sr1 = debug_atomic_read(&redundant_softirqs_on),
217 sr2 = debug_atomic_read(&redundant_softirqs_off);
219 seq_printf(m, " chain lookup misses: %11u\n",
220 debug_atomic_read(&chain_lookup_misses));
221 seq_printf(m, " chain lookup hits: %11u\n",
222 debug_atomic_read(&chain_lookup_hits));
223 seq_printf(m, " cyclic checks: %11u\n",
224 debug_atomic_read(&nr_cyclic_checks));
225 seq_printf(m, " cyclic-check recursions: %11u\n",
226 debug_atomic_read(&nr_cyclic_check_recursions));
227 seq_printf(m, " find-mask forwards checks: %11u\n",
228 debug_atomic_read(&nr_find_usage_forwards_checks));
229 seq_printf(m, " find-mask forwards recursions: %11u\n",
230 debug_atomic_read(&nr_find_usage_forwards_recursions));
231 seq_printf(m, " find-mask backwards checks: %11u\n",
232 debug_atomic_read(&nr_find_usage_backwards_checks));
233 seq_printf(m, " find-mask backwards recursions:%11u\n",
234 debug_atomic_read(&nr_find_usage_backwards_recursions));
236 seq_printf(m, " hardirq on events: %11u\n", hi1);
237 seq_printf(m, " hardirq off events: %11u\n", hi2);
238 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
239 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
240 seq_printf(m, " softirq on events: %11u\n", si1);
241 seq_printf(m, " softirq off events: %11u\n", si2);
242 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
243 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
247 static int lockdep_stats_show(struct seq_file *m, void *v)
249 struct lock_class *class;
250 unsigned long nr_unused = 0, nr_uncategorized = 0,
251 nr_irq_safe = 0, nr_irq_unsafe = 0,
252 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
253 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
254 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
255 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
256 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
257 sum_forward_deps = 0, factor = 0;
259 list_for_each_entry(class, &all_lock_classes, lock_entry) {
261 if (class->usage_mask == 0)
263 if (class->usage_mask == LOCKF_USED)
265 if (class->usage_mask & LOCKF_USED_IN_IRQ)
267 if (class->usage_mask & LOCKF_ENABLED_IRQ)
269 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
271 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
273 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
275 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
277 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
279 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
280 nr_irq_read_unsafe++;
281 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
282 nr_softirq_read_safe++;
283 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
284 nr_softirq_read_unsafe++;
285 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
286 nr_hardirq_read_safe++;
287 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
288 nr_hardirq_read_unsafe++;
290 #ifdef CONFIG_PROVE_LOCKING
291 sum_forward_deps += lockdep_count_forward_deps(class);
294 #ifdef CONFIG_DEBUG_LOCKDEP
295 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
297 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
298 nr_lock_classes, MAX_LOCKDEP_KEYS);
299 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
300 nr_list_entries, MAX_LOCKDEP_ENTRIES);
301 seq_printf(m, " indirect dependencies: %11lu\n",
305 * Total number of dependencies:
307 * All irq-safe locks may nest inside irq-unsafe locks,
308 * plus all the other known dependencies:
310 seq_printf(m, " all direct dependencies: %11lu\n",
311 nr_irq_unsafe * nr_irq_safe +
312 nr_hardirq_unsafe * nr_hardirq_safe +
316 * Estimated factor between direct and indirect
320 factor = sum_forward_deps / nr_list_entries;
322 #ifdef CONFIG_PROVE_LOCKING
323 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
324 nr_lock_chains, MAX_LOCKDEP_CHAINS);
325 seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
326 nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
329 #ifdef CONFIG_TRACE_IRQFLAGS
330 seq_printf(m, " in-hardirq chains: %11u\n",
332 seq_printf(m, " in-softirq chains: %11u\n",
335 seq_printf(m, " in-process chains: %11u\n",
337 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
338 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
339 seq_printf(m, " combined max dependencies: %11u\n",
340 (nr_hardirq_chains + 1) *
341 (nr_softirq_chains + 1) *
342 (nr_process_chains + 1)
344 seq_printf(m, " hardirq-safe locks: %11lu\n",
346 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
348 seq_printf(m, " softirq-safe locks: %11lu\n",
350 seq_printf(m, " softirq-unsafe locks: %11lu\n",
352 seq_printf(m, " irq-safe locks: %11lu\n",
354 seq_printf(m, " irq-unsafe locks: %11lu\n",
357 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
358 nr_hardirq_read_safe);
359 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
360 nr_hardirq_read_unsafe);
361 seq_printf(m, " softirq-read-safe locks: %11lu\n",
362 nr_softirq_read_safe);
363 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
364 nr_softirq_read_unsafe);
365 seq_printf(m, " irq-read-safe locks: %11lu\n",
367 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
370 seq_printf(m, " uncategorized locks: %11lu\n",
372 seq_printf(m, " unused locks: %11lu\n",
374 seq_printf(m, " max locking depth: %11u\n",
376 seq_printf(m, " max recursion depth: %11u\n",
377 max_recursion_depth);
378 #ifdef CONFIG_PROVE_LOCKING
379 seq_printf(m, " max bfs queue depth: %11u\n",
380 max_bfs_queue_depth);
382 lockdep_stats_debug_show(m);
383 seq_printf(m, " debug_locks: %11u\n",
389 static int lockdep_stats_open(struct inode *inode, struct file *file)
391 return single_open(file, lockdep_stats_show, NULL);
394 static const struct file_operations proc_lockdep_stats_operations = {
395 .open = lockdep_stats_open,
398 .release = single_release,
401 #ifdef CONFIG_LOCK_STAT
403 struct lock_stat_data {
404 struct lock_class *class;
405 struct lock_class_stats stats;
408 struct lock_stat_seq {
409 struct lock_stat_data *iter;
410 struct lock_stat_data *iter_end;
411 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
415 * sort on absolute number of contentions
417 static int lock_stat_cmp(const void *l, const void *r)
419 const struct lock_stat_data *dl = l, *dr = r;
420 unsigned long nl, nr;
422 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
423 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
428 static void seq_line(struct seq_file *m, char c, int offset, int length)
432 for (i = 0; i < offset; i++)
434 for (i = 0; i < length; i++)
435 seq_printf(m, "%c", c);
439 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
444 nr += 5; /* for display rounding */
445 div = div_s64_rem(nr, 1000, &rem);
446 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
449 static void seq_time(struct seq_file *m, s64 time)
453 snprint_time(num, sizeof(num), time);
454 seq_printf(m, " %14s", num);
457 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
459 seq_printf(m, "%14lu", lt->nr);
460 seq_time(m, lt->min);
461 seq_time(m, lt->max);
462 seq_time(m, lt->total);
465 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
468 struct lock_class *class;
469 struct lock_class_stats *stats;
473 stats = &data->stats;
476 if (class->name_version > 1)
477 namelen -= 2; /* XXX truncates versions > 9 */
482 char str[KSYM_NAME_LEN];
483 const char *key_name;
485 key_name = __get_key_name(class->key, str);
486 snprintf(name, namelen, "%s", key_name);
488 snprintf(name, namelen, "%s", class->name);
490 namelen = strlen(name);
491 if (class->name_version > 1) {
492 snprintf(name+namelen, 3, "#%d", class->name_version);
495 if (class->subclass) {
496 snprintf(name+namelen, 3, "/%d", class->subclass);
500 if (stats->write_holdtime.nr) {
501 if (stats->read_holdtime.nr)
502 seq_printf(m, "%38s-W:", name);
504 seq_printf(m, "%40s:", name);
506 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
507 seq_lock_time(m, &stats->write_waittime);
508 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
509 seq_lock_time(m, &stats->write_holdtime);
513 if (stats->read_holdtime.nr) {
514 seq_printf(m, "%38s-R:", name);
515 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
516 seq_lock_time(m, &stats->read_waittime);
517 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
518 seq_lock_time(m, &stats->read_holdtime);
522 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
525 if (stats->read_holdtime.nr)
528 for (i = 0; i < LOCKSTAT_POINTS; i++) {
529 char sym[KSYM_SYMBOL_LEN];
532 if (class->contention_point[i] == 0)
536 seq_line(m, '-', 40-namelen, namelen);
538 sprint_symbol(sym, class->contention_point[i]);
539 snprintf(ip, sizeof(ip), "[<%p>]",
540 (void *)class->contention_point[i]);
541 seq_printf(m, "%40s %14lu %29s %s\n", name,
542 stats->contention_point[i],
545 for (i = 0; i < LOCKSTAT_POINTS; i++) {
546 char sym[KSYM_SYMBOL_LEN];
549 if (class->contending_point[i] == 0)
553 seq_line(m, '-', 40-namelen, namelen);
555 sprint_symbol(sym, class->contending_point[i]);
556 snprintf(ip, sizeof(ip), "[<%p>]",
557 (void *)class->contending_point[i]);
558 seq_printf(m, "%40s %14lu %29s %s\n", name,
559 stats->contending_point[i],
564 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
569 static void seq_header(struct seq_file *m)
571 seq_printf(m, "lock_stat version 0.3\n");
573 if (unlikely(!debug_locks))
574 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
576 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
577 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
590 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
594 static void *ls_start(struct seq_file *m, loff_t *pos)
596 struct lock_stat_seq *data = m->private;
599 return SEQ_START_TOKEN;
601 data->iter = data->stats + (*pos - 1);
602 if (data->iter >= data->iter_end)
608 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
610 struct lock_stat_seq *data = m->private;
614 if (v == SEQ_START_TOKEN)
615 data->iter = data->stats;
621 if (data->iter == data->iter_end)
627 static void ls_stop(struct seq_file *m, void *v)
631 static int ls_show(struct seq_file *m, void *v)
633 if (v == SEQ_START_TOKEN)
641 static struct seq_operations lockstat_ops = {
648 static int lock_stat_open(struct inode *inode, struct file *file)
651 struct lock_class *class;
652 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
657 res = seq_open(file, &lockstat_ops);
659 struct lock_stat_data *iter = data->stats;
660 struct seq_file *m = file->private_data;
663 list_for_each_entry(class, &all_lock_classes, lock_entry) {
665 iter->stats = lock_stats(class);
668 data->iter_end = iter;
670 sort(data->stats, data->iter_end - data->iter,
671 sizeof(struct lock_stat_data),
672 lock_stat_cmp, NULL);
681 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
682 size_t count, loff_t *ppos)
684 struct lock_class *class;
688 if (get_user(c, buf))
694 list_for_each_entry(class, &all_lock_classes, lock_entry)
695 clear_lock_stats(class);
700 static int lock_stat_release(struct inode *inode, struct file *file)
702 struct seq_file *seq = file->private_data;
706 return seq_release(inode, file);
709 static const struct file_operations proc_lock_stat_operations = {
710 .open = lock_stat_open,
711 .write = lock_stat_write,
714 .release = lock_stat_release,
716 #endif /* CONFIG_LOCK_STAT */
718 static int __init lockdep_proc_init(void)
720 proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
721 #ifdef CONFIG_PROVE_LOCKING
722 proc_create("lockdep_chains", S_IRUSR, NULL,
723 &proc_lockdep_chains_operations);
725 proc_create("lockdep_stats", S_IRUSR, NULL,
726 &proc_lockdep_stats_operations);
728 #ifdef CONFIG_LOCK_STAT
729 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
735 __initcall(lockdep_proc_init);