1 // SPDX-License-Identifier: GPL-2.0
6 #include "btree_cache.h"
7 #include "btree_journal_iter.h"
8 #include "journal_io.h"
10 #include <linux/sort.h>
13 * For managing keys we read from the journal: until journal replay works normal
14 * btree lookups need to be able to find and return keys from the journal where
15 * they overwrite what's in the btree, so we have a special iterator and
16 * operations for the regular btree iter code to use:
19 static int __journal_key_cmp(enum btree_id l_btree_id,
22 const struct journal_key *r)
24 return (cmp_int(l_btree_id, r->btree_id) ?:
25 cmp_int(l_level, r->level) ?:
26 bpos_cmp(l_pos, r->k->k.p));
29 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
31 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
34 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
36 size_t gap_size = keys->size - keys->nr;
43 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
45 return keys->data + idx_to_pos(keys, idx);
48 static size_t __bch2_journal_key_search(struct journal_keys *keys,
49 enum btree_id id, unsigned level,
52 size_t l = 0, r = keys->nr, m;
55 m = l + ((r - l) >> 1);
56 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
62 BUG_ON(l < keys->nr &&
63 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
66 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
71 static size_t bch2_journal_key_search(struct journal_keys *keys,
72 enum btree_id id, unsigned level,
75 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
78 /* Returns first non-overwritten key >= search key: */
79 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
80 unsigned level, struct bpos pos,
81 struct bpos end_pos, size_t *idx)
83 struct journal_keys *keys = &c->journal_keys;
85 struct journal_key *k;
87 BUG_ON(*idx > keys->nr);
90 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
93 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
102 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
103 if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
106 if (k->overwritten) {
111 if (__journal_key_cmp(btree_id, level, pos, k) <= 0)
125 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
126 unsigned level, struct bpos pos)
130 return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
133 static void journal_iter_verify(struct journal_iter *iter)
135 struct journal_keys *keys = iter->keys;
136 size_t gap_size = keys->size - keys->nr;
138 BUG_ON(iter->idx >= keys->gap &&
139 iter->idx < keys->gap + gap_size);
141 if (iter->idx < keys->size) {
142 struct journal_key *k = keys->data + iter->idx;
144 int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
145 cmp_int(k->level, iter->level);
150 static void journal_iters_fix(struct bch_fs *c)
152 struct journal_keys *keys = &c->journal_keys;
153 /* The key we just inserted is immediately before the gap: */
154 size_t gap_end = keys->gap + (keys->size - keys->nr);
155 struct journal_key *new_key = &keys->data[keys->gap - 1];
156 struct journal_iter *iter;
159 * If an iterator points one after the key we just inserted, decrement
160 * the iterator so it points at the key we just inserted - if the
161 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
164 list_for_each_entry(iter, &c->journal_iters, list) {
165 journal_iter_verify(iter);
166 if (iter->idx == gap_end &&
167 new_key->btree_id == iter->btree_id &&
168 new_key->level == iter->level)
169 iter->idx = keys->gap - 1;
170 journal_iter_verify(iter);
174 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
176 struct journal_keys *keys = &c->journal_keys;
177 struct journal_iter *iter;
178 size_t gap_size = keys->size - keys->nr;
180 list_for_each_entry(iter, &c->journal_iters, list) {
181 if (iter->idx > old_gap)
182 iter->idx -= gap_size;
183 if (iter->idx >= new_gap)
184 iter->idx += gap_size;
188 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
189 unsigned level, struct bkey_i *k)
191 struct journal_key n = {
197 * Ensure these keys are done last by journal replay, to unblock
200 .journal_seq = U32_MAX,
202 struct journal_keys *keys = &c->journal_keys;
203 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
205 BUG_ON(test_bit(BCH_FS_rw, &c->flags));
207 if (idx < keys->size &&
208 journal_key_cmp(&n, &keys->data[idx]) == 0) {
209 if (keys->data[idx].allocated)
210 kfree(keys->data[idx].k);
216 idx -= keys->size - keys->nr;
218 size_t old_gap = keys->gap;
220 if (keys->nr == keys->size) {
221 journal_iters_move_gap(c, old_gap, keys->size);
222 old_gap = keys->size;
224 struct journal_keys new_keys = {
226 .size = max_t(size_t, keys->size, 8) * 2,
229 new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL);
230 if (!new_keys.data) {
231 bch_err(c, "%s: error allocating new key array (size %zu)",
232 __func__, new_keys.size);
233 return -BCH_ERR_ENOMEM_journal_key_insert;
236 /* Since @keys was full, there was no gap: */
237 memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr);
239 keys->data = new_keys.data;
240 keys->nr = new_keys.nr;
241 keys->size = new_keys.size;
243 /* And now the gap is at the end: */
244 keys->gap = keys->nr;
247 journal_iters_move_gap(c, old_gap, idx);
252 keys->data[keys->gap++] = n;
254 journal_iters_fix(c);
260 * Can only be used from the recovery thread while we're still RO - can't be
261 * used once we've got RW, as journal_keys is at that point used by multiple
264 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
265 unsigned level, struct bkey_i *k)
270 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
272 return -BCH_ERR_ENOMEM_journal_key_insert;
275 ret = bch2_journal_key_insert_take(c, id, level, n);
281 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
282 unsigned level, struct bpos pos)
284 struct bkey_i whiteout;
286 bkey_init(&whiteout.k);
289 return bch2_journal_key_insert(c, id, level, &whiteout);
292 bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
293 unsigned level, struct bpos pos)
295 struct journal_keys *keys = &trans->c->journal_keys;
296 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
298 if (!trans->journal_replay_not_finished)
301 return (idx < keys->size &&
302 keys->data[idx].btree_id == btree &&
303 keys->data[idx].level == level &&
304 bpos_eq(keys->data[idx].k->k.p, pos) &&
305 bkey_deleted(&keys->data[idx].k->k));
308 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
309 unsigned level, struct bpos pos)
311 struct journal_keys *keys = &c->journal_keys;
312 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
314 if (idx < keys->size &&
315 keys->data[idx].btree_id == btree &&
316 keys->data[idx].level == level &&
317 bpos_eq(keys->data[idx].k->k.p, pos))
318 keys->data[idx].overwritten = true;
321 static void bch2_journal_iter_advance(struct journal_iter *iter)
323 if (iter->idx < iter->keys->size) {
325 if (iter->idx == iter->keys->gap)
326 iter->idx += iter->keys->size - iter->keys->nr;
330 static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
332 journal_iter_verify(iter);
334 while (iter->idx < iter->keys->size) {
335 struct journal_key *k = iter->keys->data + iter->idx;
337 int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
338 cmp_int(k->level, iter->level);
344 return bkey_i_to_s_c(k->k);
346 bch2_journal_iter_advance(iter);
349 return bkey_s_c_null;
352 static void bch2_journal_iter_exit(struct journal_iter *iter)
354 list_del(&iter->list);
357 static void bch2_journal_iter_init(struct bch_fs *c,
358 struct journal_iter *iter,
359 enum btree_id id, unsigned level,
364 iter->keys = &c->journal_keys;
365 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
367 journal_iter_verify(iter);
370 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
372 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
373 iter->b, &iter->unpacked);
376 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
378 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
381 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
383 if (bpos_eq(iter->pos, SPOS_MAX))
386 iter->pos = bpos_successor(iter->pos);
389 static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter)
391 struct btree_and_journal_iter iter = *_iter;
392 struct bch_fs *c = iter.trans->c;
393 unsigned level = iter.journal.level;
395 unsigned nr = test_bit(BCH_FS_started, &c->flags)
396 ? (level > 1 ? 0 : 2)
397 : (level > 1 ? 1 : 16);
399 iter.prefetch = false;
400 bch2_bkey_buf_init(&tmp);
403 bch2_btree_and_journal_iter_advance(&iter);
404 struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter);
408 bch2_bkey_buf_reassemble(&tmp, c, k);
409 bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1);
412 bch2_bkey_buf_exit(&tmp, c);
415 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
417 struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
419 if (iter->prefetch && iter->journal.level)
420 btree_and_journal_iter_prefetch(iter);
423 return bkey_s_c_null;
425 while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
426 bpos_lt(btree_k.k->p, iter->pos))
427 bch2_journal_iter_advance_btree(iter);
429 if (iter->trans->journal_replay_not_finished)
430 while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
431 bpos_lt(journal_k.k->p, iter->pos))
432 bch2_journal_iter_advance(&iter->journal);
435 (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
439 if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
443 iter->pos = ret.k->p;
444 if (bkey_deleted(ret.k)) {
445 bch2_btree_and_journal_iter_advance(iter);
449 iter->pos = SPOS_MAX;
456 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
458 bch2_journal_iter_exit(&iter->journal);
461 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
462 struct btree_and_journal_iter *iter,
464 struct btree_node_iter node_iter,
467 memset(iter, 0, sizeof(*iter));
471 iter->node_iter = node_iter;
472 iter->pos = b->data->min_key;
473 iter->at_end = false;
474 INIT_LIST_HEAD(&iter->journal.list);
476 if (trans->journal_replay_not_finished) {
477 bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
478 if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
479 list_add(&iter->journal.list, &trans->c->journal_iters);
484 * this version is used by btree_gc before filesystem has gone RW and
485 * multithreaded, so uses the journal_iters list:
487 void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
488 struct btree_and_journal_iter *iter,
491 struct btree_node_iter node_iter;
493 bch2_btree_node_iter_init_from_start(&node_iter, b);
494 __bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
497 /* sort and dedup all keys in the journal: */
499 void bch2_journal_entries_free(struct bch_fs *c)
501 struct journal_replay **i;
502 struct genradix_iter iter;
504 genradix_for_each(&c->journal_entries, iter, i)
506 genradix_free(&c->journal_entries);
510 * When keys compare equal, oldest compares first:
512 static int journal_sort_key_cmp(const void *_l, const void *_r)
514 const struct journal_key *l = _l;
515 const struct journal_key *r = _r;
517 return journal_key_cmp(l, r) ?:
518 cmp_int(l->journal_seq, r->journal_seq) ?:
519 cmp_int(l->journal_offset, r->journal_offset);
522 void bch2_journal_keys_put(struct bch_fs *c)
524 struct journal_keys *keys = &c->journal_keys;
526 BUG_ON(atomic_read(&keys->ref) <= 0);
528 if (!atomic_dec_and_test(&keys->ref))
531 move_gap(keys, keys->nr);
533 darray_for_each(*keys, i)
539 keys->nr = keys->gap = keys->size = 0;
541 bch2_journal_entries_free(c);
544 static void __journal_keys_sort(struct journal_keys *keys)
546 sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
548 struct journal_key *dst = keys->data;
550 darray_for_each(*keys, src) {
551 if (src + 1 < &darray_top(*keys) &&
552 !journal_key_cmp(src, src + 1))
558 keys->nr = dst - keys->data;
561 int bch2_journal_keys_sort(struct bch_fs *c)
563 struct genradix_iter iter;
564 struct journal_replay *i, **_i;
565 struct journal_keys *keys = &c->journal_keys;
568 genradix_for_each(&c->journal_entries, iter, _i) {
571 if (journal_replay_ignore(i))
576 for_each_jset_key(k, entry, &i->j) {
577 struct journal_key n = (struct journal_key) {
578 .btree_id = entry->btree_id,
579 .level = entry->level,
581 .journal_seq = le64_to_cpu(i->j.seq),
582 .journal_offset = k->_data - i->j._data,
585 if (darray_push(keys, n)) {
586 __journal_keys_sort(keys);
588 if (keys->nr * 8 > keys->size * 7) {
589 bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
590 keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
591 return -BCH_ERR_ENOMEM_journal_keys_sort;
594 BUG_ON(darray_push(keys, n));
601 __journal_keys_sort(keys);
602 keys->gap = keys->nr;
604 bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
608 void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
609 unsigned level_min, unsigned level_max,
610 struct bpos start, struct bpos end)
612 struct journal_keys *keys = &c->journal_keys;
615 move_gap(keys, keys->nr);
617 darray_for_each(*keys, i)
618 if (!(i->btree_id == btree &&
619 i->level >= level_min &&
620 i->level <= level_max &&
621 bpos_ge(i->k->k.p, start) &&
622 bpos_le(i->k->k.p, end)))
623 keys->data[dst++] = *i;
624 keys->nr = keys->gap = dst;