1 // SPDX-License-Identifier: GPL-2.0
5 #include "btree_journal_iter.h"
6 #include "journal_io.h"
8 #include <linux/sort.h>
11 * For managing keys we read from the journal: until journal replay works normal
12 * btree lookups need to be able to find and return keys from the journal where
13 * they overwrite what's in the btree, so we have a special iterator and
14 * operations for the regular btree iter code to use:
17 static int __journal_key_cmp(enum btree_id l_btree_id,
20 const struct journal_key *r)
22 return (cmp_int(l_btree_id, r->btree_id) ?:
23 cmp_int(l_level, r->level) ?:
24 bpos_cmp(l_pos, r->k->k.p));
27 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
29 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
32 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
34 size_t gap_size = keys->size - keys->nr;
41 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
43 return keys->d + idx_to_pos(keys, idx);
46 static size_t __bch2_journal_key_search(struct journal_keys *keys,
47 enum btree_id id, unsigned level,
50 size_t l = 0, r = keys->nr, m;
53 m = l + ((r - l) >> 1);
54 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
60 BUG_ON(l < keys->nr &&
61 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
64 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
69 static size_t bch2_journal_key_search(struct journal_keys *keys,
70 enum btree_id id, unsigned level,
73 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
76 /* Returns first non-overwritten key >= search key: */
77 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
78 unsigned level, struct bpos pos,
79 struct bpos end_pos, size_t *idx)
81 struct journal_keys *keys = &c->journal_keys;
83 struct journal_key *k;
85 BUG_ON(*idx > keys->nr);
88 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
91 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
100 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
101 if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
104 if (k->overwritten) {
109 if (__journal_key_cmp(btree_id, level, pos, k) <= 0)
123 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
124 unsigned level, struct bpos pos)
128 return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
131 static void journal_iters_fix(struct bch_fs *c)
133 struct journal_keys *keys = &c->journal_keys;
134 /* The key we just inserted is immediately before the gap: */
135 size_t gap_end = keys->gap + (keys->size - keys->nr);
136 struct btree_and_journal_iter *iter;
139 * If an iterator points one after the key we just inserted, decrement
140 * the iterator so it points at the key we just inserted - if the
141 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
144 list_for_each_entry(iter, &c->journal_iters, journal.list)
145 if (iter->journal.idx == gap_end)
146 iter->journal.idx = keys->gap - 1;
149 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
151 struct journal_keys *keys = &c->journal_keys;
152 struct journal_iter *iter;
153 size_t gap_size = keys->size - keys->nr;
155 list_for_each_entry(iter, &c->journal_iters, list) {
156 if (iter->idx > old_gap)
157 iter->idx -= gap_size;
158 if (iter->idx >= new_gap)
159 iter->idx += gap_size;
163 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
164 unsigned level, struct bkey_i *k)
166 struct journal_key n = {
172 * Ensure these keys are done last by journal replay, to unblock
175 .journal_seq = U32_MAX,
177 struct journal_keys *keys = &c->journal_keys;
178 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
180 BUG_ON(test_bit(BCH_FS_rw, &c->flags));
182 if (idx < keys->size &&
183 journal_key_cmp(&n, &keys->d[idx]) == 0) {
184 if (keys->d[idx].allocated)
185 kfree(keys->d[idx].k);
191 idx -= keys->size - keys->nr;
193 if (keys->nr == keys->size) {
194 struct journal_keys new_keys = {
196 .size = max_t(size_t, keys->size, 8) * 2,
199 new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
201 bch_err(c, "%s: error allocating new key array (size %zu)",
202 __func__, new_keys.size);
203 return -BCH_ERR_ENOMEM_journal_key_insert;
206 /* Since @keys was full, there was no gap: */
207 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
209 keys->d = new_keys.d;
210 keys->nr = new_keys.nr;
211 keys->size = new_keys.size;
213 /* And now the gap is at the end: */
214 keys->gap = keys->nr;
217 journal_iters_move_gap(c, keys->gap, idx);
219 move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
223 keys->d[keys->gap++] = n;
225 journal_iters_fix(c);
231 * Can only be used from the recovery thread while we're still RO - can't be
232 * used once we've got RW, as journal_keys is at that point used by multiple
235 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
236 unsigned level, struct bkey_i *k)
241 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
243 return -BCH_ERR_ENOMEM_journal_key_insert;
246 ret = bch2_journal_key_insert_take(c, id, level, n);
252 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
253 unsigned level, struct bpos pos)
255 struct bkey_i whiteout;
257 bkey_init(&whiteout.k);
260 return bch2_journal_key_insert(c, id, level, &whiteout);
263 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
264 unsigned level, struct bpos pos)
266 struct journal_keys *keys = &c->journal_keys;
267 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
269 if (idx < keys->size &&
270 keys->d[idx].btree_id == btree &&
271 keys->d[idx].level == level &&
272 bpos_eq(keys->d[idx].k->k.p, pos))
273 keys->d[idx].overwritten = true;
276 static void bch2_journal_iter_advance(struct journal_iter *iter)
278 if (iter->idx < iter->keys->size) {
280 if (iter->idx == iter->keys->gap)
281 iter->idx += iter->keys->size - iter->keys->nr;
285 static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
287 struct journal_key *k = iter->keys->d + iter->idx;
289 while (k < iter->keys->d + iter->keys->size &&
290 k->btree_id == iter->btree_id &&
291 k->level == iter->level) {
293 return bkey_i_to_s_c(k->k);
295 bch2_journal_iter_advance(iter);
296 k = iter->keys->d + iter->idx;
299 return bkey_s_c_null;
302 static void bch2_journal_iter_exit(struct journal_iter *iter)
304 list_del(&iter->list);
307 static void bch2_journal_iter_init(struct bch_fs *c,
308 struct journal_iter *iter,
309 enum btree_id id, unsigned level,
314 iter->keys = &c->journal_keys;
315 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
318 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
320 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
321 iter->b, &iter->unpacked);
324 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
326 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
329 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
331 if (bpos_eq(iter->pos, SPOS_MAX))
334 iter->pos = bpos_successor(iter->pos);
337 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
339 struct bkey_s_c btree_k, journal_k, ret;
342 return bkey_s_c_null;
344 while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
345 bpos_lt(btree_k.k->p, iter->pos))
346 bch2_journal_iter_advance_btree(iter);
348 while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
349 bpos_lt(journal_k.k->p, iter->pos))
350 bch2_journal_iter_advance(&iter->journal);
353 (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
357 if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
361 iter->pos = ret.k->p;
362 if (bkey_deleted(ret.k)) {
363 bch2_btree_and_journal_iter_advance(iter);
367 iter->pos = SPOS_MAX;
374 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
376 bch2_journal_iter_exit(&iter->journal);
379 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
382 struct btree_node_iter node_iter,
385 memset(iter, 0, sizeof(*iter));
388 iter->node_iter = node_iter;
389 bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
390 INIT_LIST_HEAD(&iter->journal.list);
391 iter->pos = b->data->min_key;
392 iter->at_end = false;
396 * this version is used by btree_gc before filesystem has gone RW and
397 * multithreaded, so uses the journal_iters list:
399 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
403 struct btree_node_iter node_iter;
405 bch2_btree_node_iter_init_from_start(&node_iter, b);
406 __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
407 list_add(&iter->journal.list, &c->journal_iters);
410 /* sort and dedup all keys in the journal: */
412 void bch2_journal_entries_free(struct bch_fs *c)
414 struct journal_replay **i;
415 struct genradix_iter iter;
417 genradix_for_each(&c->journal_entries, iter, i)
419 kvpfree(*i, offsetof(struct journal_replay, j) +
420 vstruct_bytes(&(*i)->j));
421 genradix_free(&c->journal_entries);
425 * When keys compare equal, oldest compares first:
427 static int journal_sort_key_cmp(const void *_l, const void *_r)
429 const struct journal_key *l = _l;
430 const struct journal_key *r = _r;
432 return journal_key_cmp(l, r) ?:
433 cmp_int(l->journal_seq, r->journal_seq) ?:
434 cmp_int(l->journal_offset, r->journal_offset);
437 void bch2_journal_keys_put(struct bch_fs *c)
439 struct journal_keys *keys = &c->journal_keys;
440 struct journal_key *i;
442 BUG_ON(atomic_read(&keys->ref) <= 0);
444 if (!atomic_dec_and_test(&keys->ref))
447 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
448 keys->gap = keys->nr;
450 for (i = keys->d; i < keys->d + keys->nr; i++)
456 keys->nr = keys->gap = keys->size = 0;
458 bch2_journal_entries_free(c);
461 static void __journal_keys_sort(struct journal_keys *keys)
463 struct journal_key *src, *dst;
465 sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
468 while (src < keys->d + keys->nr) {
469 while (src + 1 < keys->d + keys->nr &&
470 !journal_key_cmp(src, src + 1))
476 keys->nr = dst - keys->d;
479 int bch2_journal_keys_sort(struct bch_fs *c)
481 struct genradix_iter iter;
482 struct journal_replay *i, **_i;
483 struct jset_entry *entry;
485 struct journal_keys *keys = &c->journal_keys;
486 size_t nr_keys = 0, nr_read = 0;
488 genradix_for_each(&c->journal_entries, iter, _i) {
494 for_each_jset_key(k, entry, &i->j)
501 keys->size = roundup_pow_of_two(nr_keys);
503 keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
505 bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
510 keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
511 } while (!keys->d && keys->size > nr_keys / 8);
514 bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
516 return -BCH_ERR_ENOMEM_journal_keys_sort;
520 genradix_for_each(&c->journal_entries, iter, _i) {
528 for_each_jset_key(k, entry, &i->j) {
529 if (keys->nr == keys->size) {
530 __journal_keys_sort(keys);
532 if (keys->nr > keys->size * 7 / 8) {
533 bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
534 keys->nr, keys->size, nr_read, nr_keys);
535 return -BCH_ERR_ENOMEM_journal_keys_sort;
539 keys->d[keys->nr++] = (struct journal_key) {
540 .btree_id = entry->btree_id,
541 .level = entry->level,
543 .journal_seq = le64_to_cpu(i->j.seq),
544 .journal_offset = k->_data - i->j._data,
551 __journal_keys_sort(keys);
552 keys->gap = keys->nr;
554 bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);