1 // SPDX-License-Identifier: GPL-2.0
4 #include "backpointers.h"
6 #include "alloc_background.h"
8 #include "btree_journal_iter.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
17 #include "fs-common.h"
19 #include "journal_io.h"
20 #include "journal_reclaim.h"
21 #include "journal_seq_blacklist.h"
23 #include "logged_ops.h"
26 #include "rebalance.h"
30 #include "sb-downgrade.h"
32 #include "subvolume.h"
35 #include <linux/sort.h>
36 #include <linux/stat.h>
38 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
40 static bool btree_id_is_alloc(enum btree_id id)
44 case BTREE_ID_backpointers:
45 case BTREE_ID_need_discard:
46 case BTREE_ID_freespace:
47 case BTREE_ID_bucket_gens:
54 /* for -o reconstruct_alloc: */
55 static void drop_alloc_keys(struct journal_keys *keys)
59 for (src = 0, dst = 0; src < keys->nr; src++)
60 if (!btree_id_is_alloc(keys->d[src].btree_id))
61 keys->d[dst++] = keys->d[src];
67 * Btree node pointers have a field to stack a pointer to the in memory btree
68 * node; we need to zero out this field when reading in btree nodes, or when
69 * reading in keys from the journal:
71 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
73 struct journal_key *i;
75 for (i = keys->d; i < keys->d + keys->nr; i++)
76 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
77 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
82 static void replay_now_at(struct journal *j, u64 seq)
84 BUG_ON(seq < j->replay_journal_seq);
86 seq = min(seq, j->replay_journal_seq_end);
88 while (j->replay_journal_seq < seq)
89 bch2_journal_pin_put(j, j->replay_journal_seq++);
92 static int bch2_journal_replay_key(struct btree_trans *trans,
93 struct journal_key *k)
95 struct btree_iter iter;
98 BTREE_ITER_NOT_EXTENTS;
99 unsigned update_flags = BTREE_TRIGGER_NORUN;
105 trans->journal_res.seq = k->journal_seq;
108 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
109 * keep the key cache coherent with the underlying btree. Nothing
110 * besides the allocator is doing updates yet so we don't need key cache
111 * coherency for non-alloc btrees, and key cache fills for snapshots
112 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
113 * the snapshots recovery pass runs.
115 if (!k->level && k->btree_id == BTREE_ID_alloc)
116 iter_flags |= BTREE_ITER_CACHED;
118 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
120 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
121 BTREE_MAX_DEPTH, k->level,
123 ret = bch2_btree_iter_traverse(&iter);
127 /* Must be checked with btree locked: */
131 ret = bch2_trans_update(trans, &iter, k->k, update_flags);
133 bch2_trans_iter_exit(trans, &iter);
137 static int journal_sort_seq_cmp(const void *_l, const void *_r)
139 const struct journal_key *l = *((const struct journal_key **)_l);
140 const struct journal_key *r = *((const struct journal_key **)_r);
142 return cmp_int(l->journal_seq, r->journal_seq);
145 static int bch2_journal_replay(struct bch_fs *c)
147 struct journal_keys *keys = &c->journal_keys;
148 DARRAY(struct journal_key *) keys_sorted = { 0 };
149 struct journal *j = &c->journal;
150 u64 start_seq = c->journal_replay_seq_start;
151 u64 end_seq = c->journal_replay_seq_start;
152 struct btree_trans *trans = bch2_trans_get(c);
156 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
157 keys->nr, start_seq, end_seq);
162 BUG_ON(!atomic_read(&keys->ref));
165 * First, attempt to replay keys in sorted order. This is more
166 * efficient - better locality of btree access - but some might fail if
167 * that would cause a journal deadlock.
169 for (size_t i = 0; i < keys->nr; i++) {
172 struct journal_key *k = keys->d + i;
174 /* Skip fastpath if we're low on space in the journal */
175 ret = c->journal.watermark ? -1 :
176 commit_do(trans, NULL, NULL,
177 BCH_TRANS_COMMIT_no_enospc|
178 BCH_TRANS_COMMIT_journal_reclaim|
179 (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0),
180 bch2_journal_replay_key(trans, k));
181 BUG_ON(!ret && !k->overwritten);
183 ret = darray_push(&keys_sorted, k);
190 * Now, replay any remaining keys in the order in which they appear in
191 * the journal, unpinning those journal entries as we go:
193 sort(keys_sorted.data, keys_sorted.nr,
194 sizeof(keys_sorted.data[0]),
195 journal_sort_seq_cmp, NULL);
197 darray_for_each(keys_sorted, kp) {
200 struct journal_key *k = *kp;
202 replay_now_at(j, k->journal_seq);
204 ret = commit_do(trans, NULL, NULL,
205 BCH_TRANS_COMMIT_no_enospc|
207 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
209 bch2_journal_replay_key(trans, k));
210 bch_err_msg(c, ret, "while replaying key at btree %s level %u:",
211 bch2_btree_id_str(k->btree_id), k->level);
215 BUG_ON(!k->overwritten);
219 * We need to put our btree_trans before calling flush_all_pins(), since
220 * that will use a btree_trans internally
222 bch2_trans_put(trans);
225 if (!c->opts.keep_journal)
226 bch2_journal_keys_put_initial(c);
228 replay_now_at(j, j->replay_journal_seq_end);
229 j->replay_journal_seq = 0;
231 bch2_journal_set_replay_done(j);
234 bch2_journal_log_msg(c, "journal replay finished");
237 bch2_trans_put(trans);
238 darray_exit(&keys_sorted);
243 /* journal replay early: */
245 static int journal_replay_entry_early(struct bch_fs *c,
246 struct jset_entry *entry)
250 switch (entry->type) {
251 case BCH_JSET_ENTRY_btree_root: {
252 struct btree_root *r;
254 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
255 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
260 r = bch2_btree_id_root(c, entry->btree_id);
263 r->level = entry->level;
264 bkey_copy(&r->key, (struct bkey_i *) entry->start);
272 case BCH_JSET_ENTRY_usage: {
273 struct jset_entry_usage *u =
274 container_of(entry, struct jset_entry_usage, entry);
276 switch (entry->btree_id) {
277 case BCH_FS_USAGE_reserved:
278 if (entry->level < BCH_REPLICAS_MAX)
279 c->usage_base->persistent_reserved[entry->level] =
282 case BCH_FS_USAGE_inodes:
283 c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
285 case BCH_FS_USAGE_key_version:
286 atomic64_set(&c->key_version,
293 case BCH_JSET_ENTRY_data_usage: {
294 struct jset_entry_data_usage *u =
295 container_of(entry, struct jset_entry_data_usage, entry);
297 ret = bch2_replicas_set_usage(c, &u->r,
301 case BCH_JSET_ENTRY_dev_usage: {
302 struct jset_entry_dev_usage *u =
303 container_of(entry, struct jset_entry_dev_usage, entry);
304 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
305 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
307 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
308 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
309 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
310 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
315 case BCH_JSET_ENTRY_blacklist: {
316 struct jset_entry_blacklist *bl_entry =
317 container_of(entry, struct jset_entry_blacklist, entry);
319 ret = bch2_journal_seq_blacklist_add(c,
320 le64_to_cpu(bl_entry->seq),
321 le64_to_cpu(bl_entry->seq) + 1);
324 case BCH_JSET_ENTRY_blacklist_v2: {
325 struct jset_entry_blacklist_v2 *bl_entry =
326 container_of(entry, struct jset_entry_blacklist_v2, entry);
328 ret = bch2_journal_seq_blacklist_add(c,
329 le64_to_cpu(bl_entry->start),
330 le64_to_cpu(bl_entry->end) + 1);
333 case BCH_JSET_ENTRY_clock: {
334 struct jset_entry_clock *clock =
335 container_of(entry, struct jset_entry_clock, entry);
337 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
344 static int journal_replay_early(struct bch_fs *c,
345 struct bch_sb_field_clean *clean)
348 for (struct jset_entry *entry = clean->start;
349 entry != vstruct_end(&clean->field);
350 entry = vstruct_next(entry)) {
351 int ret = journal_replay_entry_early(c, entry);
356 struct genradix_iter iter;
357 struct journal_replay *i, **_i;
359 genradix_for_each(&c->journal_entries, iter, _i) {
365 vstruct_for_each(&i->j, entry) {
366 int ret = journal_replay_entry_early(c, entry);
373 bch2_fs_usage_initialize(c);
378 /* sb clean section: */
380 static int read_btree_roots(struct bch_fs *c)
385 for (i = 0; i < btree_id_nr_alive(c); i++) {
386 struct btree_root *r = bch2_btree_id_root(c, i);
391 if (btree_id_is_alloc(i) &&
392 c->opts.reconstruct_alloc) {
393 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
400 ? FSCK_CAN_IGNORE : 0,
401 btree_root_bkey_invalid,
402 "invalid btree root %s",
403 bch2_btree_id_str(i));
404 if (i == BTREE_ID_alloc)
405 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
408 ret = bch2_btree_root_read(c, i, &r->key, r->level);
411 btree_root_read_error,
412 "error reading btree root %s",
413 bch2_btree_id_str(i));
414 if (btree_id_is_alloc(i))
415 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
420 for (i = 0; i < BTREE_ID_NR; i++) {
421 struct btree_root *r = bch2_btree_id_root(c, i);
426 bch2_btree_root_alloc(c, i);
433 static int bch2_initialize_subvolumes(struct bch_fs *c)
435 struct bkey_i_snapshot_tree root_tree;
436 struct bkey_i_snapshot root_snapshot;
437 struct bkey_i_subvolume root_volume;
440 bkey_snapshot_tree_init(&root_tree.k_i);
441 root_tree.k.p.offset = 1;
442 root_tree.v.master_subvol = cpu_to_le32(1);
443 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
445 bkey_snapshot_init(&root_snapshot.k_i);
446 root_snapshot.k.p.offset = U32_MAX;
447 root_snapshot.v.flags = 0;
448 root_snapshot.v.parent = 0;
449 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
450 root_snapshot.v.tree = cpu_to_le32(1);
451 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
453 bkey_subvolume_init(&root_volume.k_i);
454 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
455 root_volume.v.flags = 0;
456 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
457 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
459 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
460 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
461 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
466 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
468 struct btree_iter iter;
470 struct bch_inode_unpacked inode;
473 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
474 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
479 if (!bkey_is_inode(k.k)) {
480 bch_err(trans->c, "root inode not found");
481 ret = -BCH_ERR_ENOENT_inode;
485 ret = bch2_inode_unpack(k, &inode);
488 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
490 ret = bch2_inode_write(trans, &iter, &inode);
492 bch2_trans_iter_exit(trans, &iter);
496 /* set bi_subvol on root inode */
498 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
500 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
501 __bch2_fs_upgrade_for_subvolumes(trans));
506 const char * const bch2_recovery_passes[] = {
507 #define x(_fn, ...) #_fn,
508 BCH_RECOVERY_PASSES()
513 static int bch2_check_allocations(struct bch_fs *c)
515 return bch2_gc(c, true, c->opts.norecovery);
518 static int bch2_set_may_go_rw(struct bch_fs *c)
520 struct journal_keys *keys = &c->journal_keys;
523 * After we go RW, the journal keys buffer can't be modified (except for
524 * setting journal_key->overwritten: it will be accessed by multiple
527 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
528 keys->gap = keys->nr;
530 set_bit(BCH_FS_may_go_rw, &c->flags);
532 if (keys->nr || c->opts.fsck || !c->sb.clean)
533 return bch2_fs_read_write_early(c);
537 struct recovery_pass_fn {
538 int (*fn)(struct bch_fs *);
542 static struct recovery_pass_fn recovery_pass_fns[] = {
543 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
544 BCH_RECOVERY_PASSES()
548 u64 bch2_recovery_passes_to_stable(u64 v)
550 static const u8 map[] = {
551 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
552 BCH_RECOVERY_PASSES()
557 for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
559 ret |= BIT_ULL(map[i]);
563 u64 bch2_recovery_passes_from_stable(u64 v)
565 static const u8 map[] = {
566 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
567 BCH_RECOVERY_PASSES()
572 for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
574 ret |= BIT_ULL(map[i]);
578 static bool check_version_upgrade(struct bch_fs *c)
580 unsigned latest_version = bcachefs_metadata_version_current;
581 unsigned latest_compatible = min(latest_version,
582 bch2_latest_compatible_version(c->sb.version));
583 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
584 unsigned new_version = 0;
586 if (old_version < bcachefs_metadata_required_upgrade_below) {
587 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
588 latest_compatible < bcachefs_metadata_required_upgrade_below)
589 new_version = latest_version;
591 new_version = latest_compatible;
593 switch (c->opts.version_upgrade) {
594 case BCH_VERSION_UPGRADE_compatible:
595 new_version = latest_compatible;
597 case BCH_VERSION_UPGRADE_incompatible:
598 new_version = latest_version;
600 case BCH_VERSION_UPGRADE_none:
601 new_version = min(old_version, latest_version);
606 if (new_version > old_version) {
607 struct printbuf buf = PRINTBUF;
609 if (old_version < bcachefs_metadata_required_upgrade_below)
610 prt_str(&buf, "Version upgrade required:\n");
612 if (old_version != c->sb.version) {
613 prt_str(&buf, "Version upgrade from ");
614 bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
615 prt_str(&buf, " to ");
616 bch2_version_to_text(&buf, c->sb.version);
617 prt_str(&buf, " incomplete\n");
620 prt_printf(&buf, "Doing %s version upgrade from ",
621 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
622 ? "incompatible" : "compatible");
623 bch2_version_to_text(&buf, old_version);
624 prt_str(&buf, " to ");
625 bch2_version_to_text(&buf, new_version);
628 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
629 __le64 passes = ext->recovery_passes_required[0];
630 bch2_sb_set_upgrade(c, old_version, new_version);
631 passes = ext->recovery_passes_required[0] & ~passes;
634 prt_str(&buf, " running recovery passes: ");
635 prt_bitflags(&buf, bch2_recovery_passes,
636 bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
639 bch_info(c, "%s", buf.buf);
641 bch2_sb_upgrade(c, new_version);
650 u64 bch2_fsck_recovery_passes(void)
654 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
655 if (recovery_pass_fns[i].when & PASS_FSCK)
660 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
662 struct recovery_pass_fn *p = recovery_pass_fns + pass;
664 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
666 if (c->recovery_passes_explicit & BIT_ULL(pass))
668 if ((p->when & PASS_FSCK) && c->opts.fsck)
670 if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
672 if (p->when & PASS_ALWAYS)
677 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
679 struct recovery_pass_fn *p = recovery_pass_fns + pass;
682 if (!(p->when & PASS_SILENT))
683 bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
684 bch2_recovery_passes[pass]);
688 if (!(p->when & PASS_SILENT))
689 bch2_print(c, KERN_CONT " done\n");
694 static int bch2_run_recovery_passes(struct bch_fs *c)
698 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
699 if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
700 unsigned pass = c->curr_recovery_pass;
702 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
703 if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
704 (ret && c->curr_recovery_pass < pass))
709 c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
711 c->curr_recovery_pass++;
712 c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
718 int bch2_run_online_recovery_passes(struct bch_fs *c)
722 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
723 struct recovery_pass_fn *p = recovery_pass_fns + i;
725 if (!(p->when & PASS_ONLINE))
728 ret = bch2_run_recovery_pass(c, i);
729 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
730 i = c->curr_recovery_pass;
740 int bch2_fs_recovery(struct bch_fs *c)
742 struct bch_sb_field_clean *clean = NULL;
743 struct jset *last_journal_entry = NULL;
744 u64 last_seq = 0, blacklist_seq, journal_seq;
748 clean = bch2_read_superblock_clean(c);
749 ret = PTR_ERR_OR_ZERO(clean);
753 bch_info(c, "recovering from clean shutdown, journal seq %llu",
754 le64_to_cpu(clean->journal_seq));
756 bch_info(c, "recovering from unclean shutdown");
759 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
760 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
766 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
767 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
772 if (c->opts.fsck && c->opts.norecovery) {
773 bch_err(c, "cannot select both norecovery and fsck");
778 if (!c->opts.nochanges) {
779 mutex_lock(&c->sb_lock);
780 bool write_sb = false;
782 struct bch_sb_field_ext *ext =
783 bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
785 ret = -BCH_ERR_ENOSPC_sb;
786 mutex_unlock(&c->sb_lock);
790 if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
791 ext->recovery_passes_required[0] |=
792 cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
796 u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
798 struct printbuf buf = PRINTBUF;
799 prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
800 prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
801 bch_info(c, "%s", buf.buf);
805 if (bch2_check_version_downgrade(c)) {
806 struct printbuf buf = PRINTBUF;
808 prt_str(&buf, "Version downgrade required:");
810 __le64 passes = ext->recovery_passes_required[0];
811 bch2_sb_set_downgrade(c,
812 BCH_VERSION_MINOR(bcachefs_metadata_version_current),
813 BCH_VERSION_MINOR(c->sb.version));
814 passes = ext->recovery_passes_required[0] & ~passes;
816 prt_str(&buf, "\n running recovery passes: ");
817 prt_bitflags(&buf, bch2_recovery_passes,
818 bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
821 bch_info(c, "%s", buf.buf);
826 if (check_version_upgrade(c))
832 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
833 mutex_unlock(&c->sb_lock);
836 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
837 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
840 set_bit(BCH_FS_fsck_running, &c->flags);
842 ret = bch2_blacklist_table_initialize(c);
844 bch_err(c, "error initializing blacklist table");
848 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
849 struct genradix_iter iter;
850 struct journal_replay **i;
852 bch_verbose(c, "starting journal read");
853 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
858 * note: cmd_list_journal needs the blacklist table fully up to date so
859 * it can asterisk ignored journal entries:
861 if (c->opts.read_journal_only)
864 genradix_for_each_reverse(&c->journal_entries, iter, i)
865 if (*i && !(*i)->ignore) {
866 last_journal_entry = &(*i)->j;
870 if (mustfix_fsck_err_on(c->sb.clean &&
871 last_journal_entry &&
872 !journal_entry_empty(last_journal_entry), c,
873 clean_but_journal_not_empty,
874 "filesystem marked clean but journal not empty")) {
875 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
876 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
880 if (!last_journal_entry) {
881 fsck_err_on(!c->sb.clean, c,
882 dirty_but_no_journal_entries,
883 "no journal entries found");
887 genradix_for_each_reverse(&c->journal_entries, iter, i)
889 last_journal_entry = &(*i)->j;
890 (*i)->ignore = false;
892 * This was probably a NO_FLUSH entry,
893 * so last_seq was garbage - but we know
894 * we're only using a single journal
895 * entry, set it here:
897 (*i)->j.last_seq = (*i)->j.seq;
902 ret = bch2_journal_keys_sort(c);
906 if (c->sb.clean && last_journal_entry) {
907 ret = bch2_verify_superblock_clean(c, &clean,
915 bch_err(c, "no superblock clean section found");
916 ret = -BCH_ERR_fsck_repair_impossible;
920 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
923 c->journal_replay_seq_start = last_seq;
924 c->journal_replay_seq_end = blacklist_seq - 1;
926 if (c->opts.reconstruct_alloc) {
927 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
928 drop_alloc_keys(&c->journal_keys);
931 zero_out_btree_mem_ptr(&c->journal_keys);
933 ret = journal_replay_early(c, clean);
938 * After an unclean shutdown, skip then next few journal sequence
939 * numbers as they may have been referenced by btree writes that
940 * happened before their corresponding journal writes - those btree
941 * writes need to be ignored, by skipping and blacklisting the next few
942 * journal sequence numbers:
947 if (blacklist_seq != journal_seq) {
948 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
949 blacklist_seq, journal_seq) ?:
950 bch2_journal_seq_blacklist_add(c,
951 blacklist_seq, journal_seq);
953 bch_err(c, "error creating new journal seq blacklist entry");
958 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
959 journal_seq, last_seq, blacklist_seq - 1) ?:
960 bch2_fs_journal_start(&c->journal, journal_seq);
964 if (c->opts.reconstruct_alloc)
965 bch2_journal_log_msg(c, "dropping alloc info");
968 * Skip past versions that might have possibly been used (as nonces),
969 * but hadn't had their pointers written:
971 if (c->sb.encryption_type && !c->sb.clean)
972 atomic64_add(1 << 16, &c->key_version);
974 ret = read_btree_roots(c);
978 ret = bch2_run_recovery_passes(c);
982 clear_bit(BCH_FS_fsck_running, &c->flags);
984 /* If we fixed errors, verify that fs is actually clean now: */
985 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
986 test_bit(BCH_FS_errors_fixed, &c->flags) &&
987 !test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
988 !test_bit(BCH_FS_error, &c->flags)) {
989 bch2_flush_fsck_errs(c);
991 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
992 clear_bit(BCH_FS_errors_fixed, &c->flags);
994 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
996 ret = bch2_run_recovery_passes(c);
1000 if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
1001 test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
1002 bch_err(c, "Second fsck run was not clean");
1003 set_bit(BCH_FS_errors_not_fixed, &c->flags);
1006 set_bit(BCH_FS_errors_fixed, &c->flags);
1009 if (enabled_qtypes(c)) {
1010 bch_verbose(c, "reading quotas");
1011 ret = bch2_fs_quota_read(c);
1014 bch_verbose(c, "quotas done");
1017 mutex_lock(&c->sb_lock);
1018 bool write_sb = false;
1020 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
1021 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
1025 if (!test_bit(BCH_FS_error, &c->flags) &&
1026 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
1027 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1031 if (!test_bit(BCH_FS_error, &c->flags)) {
1032 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
1034 (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
1035 !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
1036 memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
1037 memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
1043 !test_bit(BCH_FS_error, &c->flags) &&
1044 !test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
1045 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1046 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1051 bch2_write_super(c);
1052 mutex_unlock(&c->sb_lock);
1054 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1055 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
1056 struct bch_move_stats stats;
1058 bch2_move_stats_init(&stats, "recovery");
1060 struct printbuf buf = PRINTBUF;
1061 bch2_version_to_text(&buf, c->sb.version_min);
1062 bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf);
1063 printbuf_exit(&buf);
1065 ret = bch2_fs_read_write_early(c) ?:
1066 bch2_scan_old_btree_nodes(c, &stats);
1069 bch_info(c, "scanning for old btree nodes done");
1072 if (c->journal_seq_blacklist_table &&
1073 c->journal_seq_blacklist_table->nr > 128)
1074 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1078 bch2_flush_fsck_errs(c);
1080 if (!c->opts.keep_journal &&
1081 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1082 bch2_journal_keys_put_initial(c);
1086 test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
1087 !c->opts.nochanges) {
1088 bch2_fs_read_write_early(c);
1089 bch2_delete_dead_snapshots_async(c);
1096 bch2_fs_emergency_read_only(c);
1100 int bch2_fs_initialize(struct bch_fs *c)
1102 struct bch_inode_unpacked root_inode, lostfound_inode;
1103 struct bkey_inode_buf packed_inode;
1104 struct qstr lostfound = QSTR("lost+found");
1107 bch_notice(c, "initializing new filesystem");
1109 mutex_lock(&c->sb_lock);
1110 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1111 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1113 bch2_check_version_downgrade(c);
1115 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
1116 bch2_sb_upgrade(c, bcachefs_metadata_version_current);
1117 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
1118 bch2_write_super(c);
1120 mutex_unlock(&c->sb_lock);
1122 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
1123 set_bit(BCH_FS_may_go_rw, &c->flags);
1125 for (unsigned i = 0; i < BTREE_ID_NR; i++)
1126 bch2_btree_root_alloc(c, i);
1128 for_each_member_device(c, ca)
1129 bch2_dev_usage_init(ca);
1131 ret = bch2_fs_journal_alloc(c);
1136 * journal_res_get() will crash if called before this has
1137 * set up the journal.pin FIFO and journal.cur pointer:
1139 bch2_fs_journal_start(&c->journal, 1);
1140 bch2_journal_set_replay_done(&c->journal);
1142 ret = bch2_fs_read_write_early(c);
1147 * Write out the superblock and journal buckets, now that we can do
1150 bch_verbose(c, "marking superblocks");
1151 ret = bch2_trans_mark_dev_sbs(c);
1152 bch_err_msg(c, ret, "marking superblocks");
1156 for_each_online_member(c, ca)
1157 ca->new_fs_bucket_idx = 0;
1159 ret = bch2_fs_freespace_init(c);
1163 ret = bch2_initialize_subvolumes(c);
1167 bch_verbose(c, "reading snapshots table");
1168 ret = bch2_snapshots_read(c);
1171 bch_verbose(c, "reading snapshots done");
1173 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1174 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1175 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1176 bch2_inode_pack(&packed_inode, &root_inode);
1177 packed_inode.inode.k.p.snapshot = U32_MAX;
1179 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
1180 bch_err_msg(c, ret, "creating root directory");
1184 bch2_inode_init_early(c, &lostfound_inode);
1186 ret = bch2_trans_do(c, NULL, NULL, 0,
1187 bch2_create_trans(trans,
1188 BCACHEFS_ROOT_SUBVOL_INUM,
1189 &root_inode, &lostfound_inode,
1191 0, 0, S_IFDIR|0700, 0,
1192 NULL, NULL, (subvol_inum) { 0 }, 0));
1193 bch_err_msg(c, ret, "creating lost+found");
1197 c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1;
1199 if (enabled_qtypes(c)) {
1200 ret = bch2_fs_quota_read(c);
1205 ret = bch2_journal_flush(&c->journal);
1206 bch_err_msg(c, ret, "writing first journal entry");
1210 mutex_lock(&c->sb_lock);
1211 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1212 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1214 bch2_write_super(c);
1215 mutex_unlock(&c->sb_lock);