1 // SPDX-License-Identifier: GPL-2.0
3 * Assorted bcachefs debug code
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
13 #include "btree_iter.h"
14 #include "btree_locking.h"
15 #include "btree_update.h"
24 #include <linux/console.h>
25 #include <linux/debugfs.h>
26 #include <linux/module.h>
27 #include <linux/random.h>
28 #include <linux/seq_file.h>
30 static struct dentry *bch_debug;
32 static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
33 struct extent_ptr_decoded pick)
35 struct btree *v = c->verify_data;
36 struct btree_node *n_ondisk = c->verify_ondisk;
37 struct btree_node *n_sorted = c->verify_data->data;
38 struct bset *sorted, *inmemory = &b->data->keys;
39 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
41 bool failed = false, saw_error = false;
43 if (!bch2_dev_get_ioref(ca, READ))
46 bio = bio_alloc_bioset(ca->disk_sb.bdev,
47 buf_pages(n_sorted, btree_buf_bytes(b)),
51 bio->bi_iter.bi_sector = pick.ptr.offset;
52 bch2_bio_map(bio, n_sorted, btree_buf_bytes(b));
57 percpu_ref_put(&ca->io_ref);
59 memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
62 if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
65 n_sorted = c->verify_data->data;
66 sorted = &n_sorted->keys;
68 if (inmemory->u64s != sorted->u64s ||
69 memcmp(inmemory->start,
71 vstruct_end(inmemory) - (void *) inmemory->start)) {
72 unsigned offset = 0, sectors;
78 printk(KERN_ERR "*** in memory:\n");
79 bch2_dump_bset(c, b, inmemory, 0);
81 printk(KERN_ERR "*** read back in:\n");
82 bch2_dump_bset(c, v, sorted, 0);
84 while (offset < v->written) {
87 sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
90 struct btree_node_entry *bne =
91 (void *) n_ondisk + (offset << 9);
94 sectors = vstruct_blocks(bne, c->block_bits) <<
98 printk(KERN_ERR "*** on disk block %u:\n", offset);
99 bch2_dump_bset(c, b, i, offset);
104 for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
105 if (inmemory->_data[j] != sorted->_data[j])
109 bch_err(c, "verify failed at key %u", j);
114 if (v->written != b->written) {
115 bch_err(c, "written wrong: expected %u, got %u",
116 b->written, v->written);
123 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
125 struct bkey_ptrs_c ptrs;
126 struct extent_ptr_decoded p;
127 const union bch_extent_entry *entry;
129 struct bset *inmemory = &b->data->keys;
130 struct bkey_packed *k;
133 if (c->opts.nochanges)
136 bch2_btree_node_io_lock(b);
137 mutex_lock(&c->verify_lock);
139 if (!c->verify_ondisk) {
140 c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
141 if (!c->verify_ondisk)
145 if (!c->verify_data) {
146 c->verify_data = __bch2_btree_node_mem_alloc(c);
150 list_del_init(&c->verify_data->list);
153 BUG_ON(b->nsets != 1);
155 for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
156 if (k->type == KEY_TYPE_btree_ptr_v2)
157 ((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0;
160 bkey_copy(&v->key, &b->key);
161 v->c.level = b->c.level;
162 v->c.btree_id = b->c.btree_id;
163 bch2_btree_keys_init(v);
165 ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
166 bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry)
167 failed |= bch2_btree_verify_replica(c, b, p);
170 struct printbuf buf = PRINTBUF;
172 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
173 bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf.buf);
177 mutex_unlock(&c->verify_lock);
178 bch2_btree_node_io_unlock(b);
181 void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
182 const struct btree *b)
184 struct btree_node *n_ondisk = NULL;
185 struct extent_ptr_decoded pick;
187 struct bio *bio = NULL;
191 if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick) <= 0) {
192 prt_printf(out, "error getting device to read from: invalid device\n");
196 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
197 if (!bch2_dev_get_ioref(ca, READ)) {
198 prt_printf(out, "error getting device to read from: not online\n");
202 n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
204 prt_printf(out, "memory allocation failure\n");
208 bio = bio_alloc_bioset(ca->disk_sb.bdev,
209 buf_pages(n_ondisk, btree_buf_bytes(b)),
210 REQ_OP_READ|REQ_META,
213 bio->bi_iter.bi_sector = pick.ptr.offset;
214 bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b));
216 ret = submit_bio_wait(bio);
218 prt_printf(out, "IO error reading btree node: %s\n", bch2_err_str(ret));
222 while (offset < btree_sectors(c)) {
225 struct bch_csum csum;
226 struct bkey_packed *k;
232 if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
233 prt_printf(out, "unknown checksum type at offset %u: %llu\n",
234 offset, BSET_CSUM_TYPE(i));
238 nonce = btree_nonce(i, offset << 9);
239 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, n_ondisk);
241 if (bch2_crc_cmp(csum, n_ondisk->csum)) {
242 prt_printf(out, "invalid checksum\n");
246 bset_encrypt(c, i, offset << 9);
248 sectors = vstruct_sectors(n_ondisk, c->block_bits);
250 struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9);
254 if (i->seq != n_ondisk->keys.seq)
257 if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
258 prt_printf(out, "unknown checksum type at offset %u: %llu\n",
259 offset, BSET_CSUM_TYPE(i));
263 nonce = btree_nonce(i, offset << 9);
264 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
266 if (bch2_crc_cmp(csum, bne->csum)) {
267 prt_printf(out, "invalid checksum");
271 bset_encrypt(c, i, offset << 9);
273 sectors = vstruct_sectors(bne, c->block_bits);
276 prt_printf(out, " offset %u version %u, journal seq %llu\n",
278 le16_to_cpu(i->version),
279 le64_to_cpu(i->journal_seq));
282 printbuf_indent_add(out, 4);
284 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
287 bch2_bkey_val_to_text(out, c, bkey_disassemble(b, k, &u));
291 printbuf_indent_sub(out, 4);
296 kvpfree(n_ondisk, btree_buf_bytes(b));
297 percpu_ref_put(&ca->io_ref);
300 #ifdef CONFIG_DEBUG_FS
302 /* XXX: bch_fs refcounting */
308 struct bpos prev_node;
313 char __user *ubuf; /* destination user buffer */
314 size_t size; /* size of requested read */
315 ssize_t ret; /* bytes read so far */
318 static ssize_t flush_buf(struct dump_iter *i)
321 size_t bytes = min_t(size_t, i->buf.pos, i->size);
322 int copied = bytes - copy_to_user(i->ubuf, i->buf.buf, bytes);
327 i->buf.pos -= copied;
328 memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos);
334 return i->size ? 0 : i->ret;
337 static int bch2_dump_open(struct inode *inode, struct file *file)
339 struct btree_debug *bd = inode->i_private;
342 i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
346 file->private_data = i;
349 i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
356 static int bch2_dump_release(struct inode *inode, struct file *file)
358 struct dump_iter *i = file->private_data;
360 printbuf_exit(&i->buf);
365 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
366 size_t size, loff_t *ppos)
368 struct dump_iter *i = file->private_data;
374 return flush_buf(i) ?:
376 for_each_btree_key(trans, iter, i->id, i->from,
378 BTREE_ITER_ALL_SNAPSHOTS, k, ({
379 bch2_bkey_val_to_text(&i->buf, i->c, k);
380 prt_newline(&i->buf);
381 bch2_trans_unlock(trans);
382 i->from = bpos_successor(iter.pos);
388 static const struct file_operations btree_debug_ops = {
389 .owner = THIS_MODULE,
390 .open = bch2_dump_open,
391 .release = bch2_dump_release,
392 .read = bch2_read_btree,
395 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
396 size_t size, loff_t *ppos)
398 struct dump_iter *i = file->private_data;
399 struct btree_trans *trans;
400 struct btree_iter iter;
412 if (bpos_eq(SPOS_MAX, i->from))
415 trans = bch2_trans_get(i->c);
417 bch2_trans_begin(trans);
419 for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) {
420 bch2_btree_node_to_text(&i->buf, i->c, b);
421 i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
422 ? bpos_successor(b->key.k.p)
425 ret = drop_locks_do(trans, flush_buf(i));
429 bch2_trans_iter_exit(trans, &iter);
431 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
434 bch2_trans_put(trans);
439 return ret ?: i->ret;
442 static const struct file_operations btree_format_debug_ops = {
443 .owner = THIS_MODULE,
444 .open = bch2_dump_open,
445 .release = bch2_dump_release,
446 .read = bch2_read_btree_formats,
449 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
450 size_t size, loff_t *ppos)
452 struct dump_iter *i = file->private_data;
458 return flush_buf(i) ?:
460 for_each_btree_key(trans, iter, i->id, i->from,
462 BTREE_ITER_ALL_SNAPSHOTS, k, ({
463 struct btree_path_level *l =
464 &btree_iter_path(trans, &iter)->l[0];
465 struct bkey_packed *_k =
466 bch2_btree_node_iter_peek(&l->iter, l->b);
468 if (bpos_gt(l->b->key.k.p, i->prev_node)) {
469 bch2_btree_node_to_text(&i->buf, i->c, l->b);
470 i->prev_node = l->b->key.k.p;
473 bch2_bfloat_to_text(&i->buf, l->b, _k);
474 bch2_trans_unlock(trans);
475 i->from = bpos_successor(iter.pos);
481 static const struct file_operations bfloat_failed_debug_ops = {
482 .owner = THIS_MODULE,
483 .open = bch2_dump_open,
484 .release = bch2_dump_release,
485 .read = bch2_read_bfloat_failed,
488 static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
491 if (!out->nr_tabstops)
492 printbuf_tabstop_push(out, 32);
494 prt_printf(out, "%px btree=%s l=%u ",
496 bch2_btree_id_str(b->c.btree_id),
500 printbuf_indent_add(out, 2);
502 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
505 prt_printf(out, "flags: ");
507 prt_bitflags(out, bch2_btree_node_flags, b->flags);
510 prt_printf(out, "pcpu read locks: ");
512 prt_printf(out, "%u", b->c.lock.readers != NULL);
515 prt_printf(out, "written:");
517 prt_printf(out, "%u", b->written);
520 prt_printf(out, "writes blocked:");
522 prt_printf(out, "%u", !list_empty_careful(&b->write_blocked));
525 prt_printf(out, "will make reachable:");
527 prt_printf(out, "%lx", b->will_make_reachable);
530 prt_printf(out, "journal pin %px:", &b->writes[0].journal);
532 prt_printf(out, "%llu", b->writes[0].journal.seq);
535 prt_printf(out, "journal pin %px:", &b->writes[1].journal);
537 prt_printf(out, "%llu", b->writes[1].journal.seq);
540 printbuf_indent_sub(out, 2);
543 static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
544 size_t size, loff_t *ppos)
546 struct dump_iter *i = file->private_data;
547 struct bch_fs *c = i->c;
556 struct bucket_table *tbl;
557 struct rhash_head *pos;
566 tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
567 &c->btree_cache.table);
568 if (i->iter < tbl->size) {
569 rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
570 bch2_cached_btree_node_to_text(&i->buf, c, b);
579 if (i->buf.allocation_failure)
585 return ret ?: i->ret;
588 static const struct file_operations cached_btree_nodes_ops = {
589 .owner = THIS_MODULE,
590 .open = bch2_dump_open,
591 .release = bch2_dump_release,
592 .read = bch2_cached_btree_nodes_read,
595 static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
596 size_t size, loff_t *ppos)
598 struct dump_iter *i = file->private_data;
599 struct bch_fs *c = i->c;
600 struct btree_trans *trans;
608 seqmutex_lock(&c->btree_trans_lock);
609 list_for_each_entry(trans, &c->btree_trans_list, list) {
610 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
612 if (!task || task->pid <= i->iter)
615 closure_get(&trans->ref);
616 seq = seqmutex_seq(&c->btree_trans_lock);
617 seqmutex_unlock(&c->btree_trans_lock);
621 closure_put(&trans->ref);
625 bch2_btree_trans_to_text(&i->buf, trans);
627 prt_printf(&i->buf, "backtrace:");
628 prt_newline(&i->buf);
629 printbuf_indent_add(&i->buf, 2);
630 bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
631 printbuf_indent_sub(&i->buf, 2);
632 prt_newline(&i->buf);
636 closure_put(&trans->ref);
638 if (!seqmutex_relock(&c->btree_trans_lock, seq))
641 seqmutex_unlock(&c->btree_trans_lock);
643 if (i->buf.allocation_failure)
649 return ret ?: i->ret;
652 static const struct file_operations btree_transactions_ops = {
653 .owner = THIS_MODULE,
654 .open = bch2_dump_open,
655 .release = bch2_dump_release,
656 .read = bch2_btree_transactions_read,
659 static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
660 size_t size, loff_t *ppos)
662 struct dump_iter *i = file->private_data;
663 struct bch_fs *c = i->c;
679 done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
683 if (i->buf.allocation_failure)
689 static const struct file_operations journal_pins_ops = {
690 .owner = THIS_MODULE,
691 .open = bch2_dump_open,
692 .release = bch2_dump_release,
693 .read = bch2_journal_pins_read,
696 static int btree_transaction_stats_open(struct inode *inode, struct file *file)
698 struct bch_fs *c = inode->i_private;
701 i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
709 file->private_data = i;
714 static int btree_transaction_stats_release(struct inode *inode, struct file *file)
716 struct dump_iter *i = file->private_data;
718 printbuf_exit(&i->buf);
724 static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
725 size_t size, loff_t *ppos)
727 struct dump_iter *i = file->private_data;
728 struct bch_fs *c = i->c;
736 struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter];
745 if (i->iter == ARRAY_SIZE(bch2_btree_transaction_fns) ||
746 !bch2_btree_transaction_fns[i->iter])
749 prt_printf(&i->buf, "%s: ", bch2_btree_transaction_fns[i->iter]);
750 prt_newline(&i->buf);
751 printbuf_indent_add(&i->buf, 2);
753 mutex_lock(&s->lock);
755 prt_printf(&i->buf, "Max mem used: %u", s->max_mem);
756 prt_newline(&i->buf);
758 prt_printf(&i->buf, "Transaction duration:");
759 prt_newline(&i->buf);
761 printbuf_indent_add(&i->buf, 2);
762 bch2_time_stats_to_text(&i->buf, &s->duration);
763 printbuf_indent_sub(&i->buf, 2);
765 if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
766 prt_printf(&i->buf, "Lock hold times:");
767 prt_newline(&i->buf);
769 printbuf_indent_add(&i->buf, 2);
770 bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
771 printbuf_indent_sub(&i->buf, 2);
774 if (s->max_paths_text) {
775 prt_printf(&i->buf, "Maximum allocated btree paths (%u):", s->nr_max_paths);
776 prt_newline(&i->buf);
778 printbuf_indent_add(&i->buf, 2);
779 prt_str_indented(&i->buf, s->max_paths_text);
780 printbuf_indent_sub(&i->buf, 2);
783 mutex_unlock(&s->lock);
785 printbuf_indent_sub(&i->buf, 2);
786 prt_newline(&i->buf);
790 if (i->buf.allocation_failure)
796 static const struct file_operations btree_transaction_stats_op = {
797 .owner = THIS_MODULE,
798 .open = btree_transaction_stats_open,
799 .release = btree_transaction_stats_release,
800 .read = btree_transaction_stats_read,
803 static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
804 size_t size, loff_t *ppos)
806 struct dump_iter *i = file->private_data;
807 struct bch_fs *c = i->c;
808 struct btree_trans *trans;
819 seqmutex_lock(&c->btree_trans_lock);
820 list_for_each_entry(trans, &c->btree_trans_list, list) {
821 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
823 if (!task || task->pid <= i->iter)
826 closure_get(&trans->ref);
827 seq = seqmutex_seq(&c->btree_trans_lock);
828 seqmutex_unlock(&c->btree_trans_lock);
832 closure_put(&trans->ref);
836 bch2_check_for_deadlock(trans, &i->buf);
840 closure_put(&trans->ref);
842 if (!seqmutex_relock(&c->btree_trans_lock, seq))
845 seqmutex_unlock(&c->btree_trans_lock);
847 if (i->buf.allocation_failure)
853 return ret ?: i->ret;
856 static const struct file_operations btree_deadlock_ops = {
857 .owner = THIS_MODULE,
858 .open = bch2_dump_open,
859 .release = bch2_dump_release,
860 .read = bch2_btree_deadlock_read,
863 void bch2_fs_debug_exit(struct bch_fs *c)
865 if (!IS_ERR_OR_NULL(c->fs_debug_dir))
866 debugfs_remove_recursive(c->fs_debug_dir);
869 void bch2_fs_debug_init(struct bch_fs *c)
871 struct btree_debug *bd;
874 if (IS_ERR_OR_NULL(bch_debug))
877 snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
878 c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
879 if (IS_ERR_OR_NULL(c->fs_debug_dir))
882 debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
883 c->btree_debug, &cached_btree_nodes_ops);
885 debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir,
886 c->btree_debug, &btree_transactions_ops);
888 debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
889 c->btree_debug, &journal_pins_ops);
891 debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
892 c, &btree_transaction_stats_op);
894 debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
895 c->btree_debug, &btree_deadlock_ops);
897 c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
898 if (IS_ERR_OR_NULL(c->btree_debug_dir))
901 for (bd = c->btree_debug;
902 bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
904 bd->id = bd - c->btree_debug;
905 debugfs_create_file(bch2_btree_id_str(bd->id),
906 0400, c->btree_debug_dir, bd,
909 snprintf(name, sizeof(name), "%s-formats",
910 bch2_btree_id_str(bd->id));
912 debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
913 &btree_format_debug_ops);
915 snprintf(name, sizeof(name), "%s-bfloat-failed",
916 bch2_btree_id_str(bd->id));
918 debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
919 &bfloat_failed_debug_ops);
925 void bch2_debug_exit(void)
927 if (!IS_ERR_OR_NULL(bch_debug))
928 debugfs_remove_recursive(bch_debug);
931 int __init bch2_debug_init(void)
933 bch_debug = debugfs_create_dir("bcachefs", NULL);