1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
5 #include "btree_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
9 #define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
11 #define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
14 * Tracks an in progress split/rewrite of a btree node and the update to the
17 * When we split/rewrite a node, we do all the updates in memory without
18 * waiting for any writes to complete - we allocate the new node(s) and update
19 * the parent node, possibly recursively up to the root.
21 * The end result is that we have one or more new nodes being written -
22 * possibly several, if there were multiple splits - and then a write (updating
23 * an interior node) which will make all these new nodes visible.
25 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
26 * nodes can't be freed (their space on disk can't be reclaimed) until the
27 * update to the interior node that makes the new node visible completes -
28 * until then, the old nodes are still reachable on disk.
36 struct list_head list;
37 struct list_head unwritten_list;
39 /* What kind of update are we doing? */
41 BTREE_INTERIOR_NO_UPDATE,
42 BTREE_INTERIOR_UPDATING_NODE,
43 BTREE_INTERIOR_UPDATING_ROOT,
44 BTREE_INTERIOR_UPDATING_AS,
47 unsigned nodes_written:1;
48 unsigned took_gc_lock:1;
50 enum btree_id btree_id;
51 unsigned update_level;
53 struct disk_reservation disk_res;
56 * BTREE_INTERIOR_UPDATING_NODE:
57 * The update that made the new nodes visible was a regular update to an
58 * existing interior node - @b. We can't write out the update to @b
59 * until the new nodes we created are finished writing, so we block @b
60 * from writing by putting this btree_interior update on the
61 * @b->write_blocked list with @write_blocked_list:
64 struct list_head write_blocked_list;
67 * We may be freeing nodes that were dirty, and thus had journal entries
68 * pinned: we need to transfer the oldest of those pins to the
69 * btree_update operation, and release it when the new node(s)
70 * are all persistent and reachable:
72 struct journal_entry_pin journal;
74 /* Preallocated nodes we reserve when we start the update: */
75 struct prealloc_nodes {
76 struct btree *b[BTREE_UPDATE_NODES_MAX];
80 /* Nodes being freed: */
81 struct keylist old_keys;
82 u64 _old_keys[BTREE_UPDATE_NODES_MAX *
83 BKEY_BTREE_PTR_U64s_MAX];
85 /* Nodes being added: */
86 struct keylist new_keys;
87 u64 _new_keys[BTREE_UPDATE_NODES_MAX *
88 BKEY_BTREE_PTR_U64s_MAX];
90 /* New nodes, that will be made reachable by this update: */
91 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
92 unsigned nr_new_nodes;
94 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
95 __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX];
96 unsigned nr_old_nodes;
98 open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
100 open_bucket_idx_t nr_open_buckets;
102 unsigned journal_u64s;
103 u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
105 /* Only here to reduce stack usage on recursive splits: */
106 struct keylist parent_keys;
108 * Enough room for btree_split's keys without realloc - btree node
109 * pointers never have crc/compression info, so we only need to acount
110 * for the pointers for three keys
112 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
115 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
116 struct btree_trans *,
120 int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
122 int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
123 unsigned, unsigned, enum btree_node_sibling);
125 static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
126 btree_path_idx_t path_idx,
127 unsigned level, unsigned flags,
128 enum btree_node_sibling sib)
130 struct btree_path *path = trans->paths + path_idx;
133 EBUG_ON(!btree_node_locked(path, level));
135 b = path->l[level].b;
136 if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
139 return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
142 static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
143 btree_path_idx_t path,
147 return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
149 bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
153 int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
154 struct btree *, unsigned);
155 void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
156 int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
157 struct btree *, struct bkey_i *,
159 int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
160 struct bkey_i *, unsigned, bool);
162 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
163 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
165 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
168 unsigned depth = btree_node_root(c, b)->c.level + 1;
171 * Number of nodes we might have to allocate in a worst case btree
172 * split operation - we split all the way up to the root, then allocate
173 * a new root, unless we're already at max depth:
175 if (depth < BTREE_MAX_DEPTH)
176 return (depth - b->c.level) * 2 + 1;
178 return (depth - b->c.level) * 2 - 1;
181 static inline void btree_node_reset_sib_u64s(struct btree *b)
183 b->sib_u64s[0] = b->nr.live_u64s;
184 b->sib_u64s[1] = b->nr.live_u64s;
187 static inline void *btree_data_end(struct btree *b)
189 return (void *) b->data + btree_buf_bytes(b);
192 static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b)
194 return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s);
197 static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b)
199 return btree_data_end(b);
202 static inline void *write_block(struct btree *b)
204 return (void *) b->data + (b->written << 9);
207 static inline bool __btree_addr_written(struct btree *b, void *p)
209 return p < write_block(b);
212 static inline bool bset_written(struct btree *b, struct bset *i)
214 return __btree_addr_written(b, i);
217 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
219 return __btree_addr_written(b, k);
222 static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end)
224 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
226 ssize_t total = btree_buf_bytes(b) >> 3;
228 /* Always leave one extra u64 for bch2_varint_decode: */
234 static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b)
236 ssize_t remaining = __bch2_btree_u64s_remaining(b,
237 btree_bkey_last(b, bset_tree_last(b)));
239 BUG_ON(remaining < 0);
241 if (bset_written(b, btree_bset_last(b)))
247 #define BTREE_WRITE_SET_U64s_BITS 9
249 static inline unsigned btree_write_set_buffer(struct btree *b)
252 * Could buffer up larger amounts of keys for btrees with larger keys,
253 * pending benchmarking:
255 return 8 << BTREE_WRITE_SET_U64s_BITS;
258 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b)
260 struct bset_tree *t = bset_tree_last(b);
261 struct btree_node_entry *bne = max(write_block(b),
262 (void *) btree_bkey_last(b, bset_tree_last(b)));
263 ssize_t remaining_space =
264 __bch2_btree_u64s_remaining(b, bne->keys.start);
266 if (unlikely(bset_written(b, bset(b, t)))) {
267 if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
270 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
271 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
278 static inline void push_whiteout(struct btree *b, struct bpos pos)
280 struct bkey_packed k;
282 BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s);
283 EBUG_ON(btree_node_just_written(b));
285 if (!bkey_pack_pos(&k, pos, b)) {
286 struct bkey *u = (void *) &k;
292 k.needs_whiteout = true;
294 b->whiteout_u64s += k.u64s;
295 bkey_p_copy(unwritten_whiteouts_start(b), &k);
299 * write lock must be held on @b (else the dirty bset that we were going to
300 * insert into could be written out from under us)
302 static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s)
304 if (unlikely(btree_node_need_rewrite(b)))
307 return u64s <= bch2_btree_keys_u64s_remaining(b);
310 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
312 bool bch2_btree_interior_updates_flush(struct bch_fs *);
314 void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
315 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
316 struct jset_entry *, unsigned long);
318 void bch2_do_pending_node_rewrites(struct bch_fs *);
319 void bch2_free_pending_node_rewrites(struct bch_fs *);
321 void bch2_fs_btree_interior_update_exit(struct bch_fs *);
322 void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
323 int bch2_fs_btree_interior_update_init(struct bch_fs *);
325 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */