1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
8 #include "bbpos_types.h"
9 #include "btree_key_cache_types.h"
10 #include "buckets_types.h"
13 #include "journal_types.h"
14 #include "replicas_types.h"
23 struct btree_nr_keys {
26 * Amount of live metadata (i.e. size of node after a compaction) in
30 u16 bset_u64s[MAX_BSETS];
39 * We construct a binary tree in an array as if the array
40 * started at 1, so that things line up on the same cachelines
41 * better: see comments in bset.c at cacheline_to_bkey() for
45 /* size of the binary tree and prev array */
48 /* function of size - precalculated for to_inorder() */
57 struct journal_entry_pin journal;
61 struct open_buckets ob;
62 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
65 struct btree_bkey_cached_common {
73 struct btree_bkey_cached_common c;
75 struct rhash_head hash;
84 struct bkey_format format;
86 struct btree_node *data;
90 * Sets of sorted keys - the real btree node - plus a binary search tree
92 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
93 * to the memory we have allocated for this btree node. Additionally,
94 * set[0]->data points to the entire btree node as it exists on disk.
96 struct bset_tree set[MAX_BSETS];
98 struct btree_nr_keys nr;
104 struct btree_write writes[2];
106 /* Key/pointer for this btree node */
107 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
110 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
111 * fails because the lock sequence number has changed - i.e. the
112 * contents were modified - we can still relock the node if it's still
113 * the one we want, without redoing the traversal
117 * For asynchronous splits/interior node updates:
118 * When we do a split, we allocate new child nodes and update the parent
119 * node to point to them: we update the parent in memory immediately,
120 * but then we must wait until the children have been written out before
121 * the update to the parent can be written - this is a list of the
122 * btree_updates that are blocking this node from being
125 struct list_head write_blocked;
128 * Also for asynchronous splits/interior node updates:
129 * If a btree node isn't reachable yet, we don't want to kick off
130 * another write - because that write also won't yet be reachable and
131 * marking it as completed before it's reachable would be incorrect:
133 unsigned long will_make_reachable;
135 struct open_buckets ob;
138 struct list_head list;
142 struct rhashtable table;
143 bool table_init_done;
145 * We never free a struct btree, except on shutdown - we just put it on
146 * the btree_cache_freed list and reuse it later. This simplifies the
147 * code, and it doesn't cost us much memory as the memory usage is
148 * dominated by buffers that hold the actual btree node data and those
149 * can be freed - and the number of struct btrees allocated is
150 * effectively bounded.
152 * btree_cache_freeable effectively is a small cache - we use it because
153 * high order page allocations can be rather expensive, and it's quite
154 * common to delete and allocate btree nodes in quick succession. It
155 * should never grow past ~2-3 nodes in practice.
158 struct list_head live;
159 struct list_head freeable;
160 struct list_head freed_pcpu;
161 struct list_head freed_nonpcpu;
163 /* Number of elements in live + freeable lists */
167 struct shrinker *shrink;
170 * If we need to allocate memory for a new btree node and that
171 * allocation fails, we can cannibalize another node in the btree cache
172 * to satisfy the allocation - lock to guarantee only one thread does
175 struct task_struct *alloc_lock;
176 struct closure_waitlist alloc_wait;
178 struct bbpos pinned_nodes_start;
179 struct bbpos pinned_nodes_end;
180 u64 pinned_nodes_leaf_mask;
181 u64 pinned_nodes_interior_mask;
184 struct btree_node_iter {
185 struct btree_node_iter_set {
191 * Iterate over all possible positions, synthesizing deleted keys for holes:
193 static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
195 * Indicates that intent locks should be taken on leaf nodes, because we expect
196 * to be doing updates:
198 static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 1;
200 * Causes the btree iterator code to prefetch additional btree nodes from disk:
202 static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 2;
204 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
205 * @pos or the first key strictly greater than @pos
207 static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 3;
208 static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 4;
209 static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 5;
210 static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 6;
211 static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 7;
212 static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 8;
213 static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 9;
214 static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
215 static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 11;
216 static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 12;
217 static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 13;
218 static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 14;
219 #define __BTREE_ITER_FLAGS_END 15
221 enum btree_path_uptodate {
222 BTREE_ITER_UPTODATE = 0,
223 BTREE_ITER_NEED_RELOCK = 1,
224 BTREE_ITER_NEED_TRAVERSE = 2,
227 #if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
228 #define TRACK_PATH_ALLOCATED
231 typedef u16 btree_path_idx_t;
234 btree_path_idx_t sorted_idx;
238 /* btree_iter_copy starts here: */
241 enum btree_id btree_id:5;
244 enum btree_path_uptodate uptodate:2;
246 * When true, failing to relock this path will cause the transaction to
249 bool should_be_locked:1;
254 struct btree_path_level {
256 struct btree_node_iter iter;
258 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
261 } l[BTREE_MAX_DEPTH];
262 #ifdef TRACK_PATH_ALLOCATED
263 unsigned long ip_allocated;
267 static inline struct btree_path_level *path_l(struct btree_path *path)
269 return path->l + path->level;
272 static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
274 #ifdef TRACK_PATH_ALLOCATED
275 return path->ip_allocated;
282 * @pos - iterator's current position
283 * @level - current btree depth
284 * @locks_want - btree level below which we start taking intent locks
285 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
286 * @nodes_intent_locked - bitmask indicating which locks are intent locks
289 struct btree_trans *trans;
290 btree_path_idx_t path;
291 btree_path_idx_t update_path;
292 btree_path_idx_t key_cache_path;
294 enum btree_id btree_id:8;
297 /* btree_iter_copy starts here: */
300 /* When we're filtering by snapshot, the snapshot ID we're looking for: */
305 * Current unpacked key - so that bch2_btree_iter_next()/
306 * bch2_btree_iter_next_slot() can correctly advance pos.
310 /* BTREE_ITER_WITH_JOURNAL: */
312 #ifdef TRACK_PATH_ALLOCATED
313 unsigned long ip_allocated;
317 #define BKEY_CACHED_ACCESSED 0
318 #define BKEY_CACHED_DIRTY 1
321 struct btree_bkey_cached_common c;
324 unsigned long btree_trans_barrier_seq;
327 struct bkey_cached_key key;
329 struct rhash_head hash;
330 struct list_head list;
332 struct journal_entry_pin journal;
338 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
341 ? container_of(b, struct btree, c)->key.k.p
342 : container_of(b, struct bkey_cached, c)->key.pos;
345 struct btree_insert_entry {
348 enum btree_id btree_id:8;
351 bool insert_trigger_run:1;
352 bool overwrite_trigger_run:1;
353 bool key_cache_already_flushed:1;
355 * @old_k may be a key from the journal; @old_btree_u64s always refers
356 * to the size of the key being overwritten in the btree:
359 btree_path_idx_t path;
361 /* key being overwritten: */
363 const struct bch_val *old_v;
364 unsigned long ip_allocated;
367 /* Number of btree paths we preallocate, usually enough */
368 #define BTREE_ITER_INITIAL 64
370 * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
371 * paths should run inside this limit, and if they don't it usually indicates a
372 * bug (leaking/duplicated btree paths).
374 * exception: some fsck paths
376 * bugs with excessive path usage seem to have possibly been eliminated now, so
377 * we might consider eliminating this (and btree_trans_too_many_iter()) at some
380 #define BTREE_ITER_NORMAL_LIMIT 256
381 /* never exceed limit */
382 #define BTREE_ITER_MAX (1U << 10)
384 struct btree_trans_commit_hook;
385 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
387 struct btree_trans_commit_hook {
388 btree_trans_commit_hook_fn *fn;
389 struct btree_trans_commit_hook *next;
392 #define BTREE_TRANS_MEM_MAX (1U << 16)
394 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS 10000
396 struct btree_trans_paths {
397 unsigned long nr_paths;
398 struct btree_path paths[];
404 unsigned long *paths_allocated;
405 struct btree_path *paths;
406 btree_path_idx_t *sorted;
407 struct btree_insert_entry *updates;
413 btree_path_idx_t nr_sorted;
414 btree_path_idx_t nr_paths;
415 btree_path_idx_t nr_paths_max;
419 bool lock_may_not_fail:1;
422 bool in_traverse_all:1;
424 bool memory_allocation_failure:1;
425 bool journal_transaction_names:1;
426 bool journal_replay_not_finished:1;
427 bool notrace_relock_fail:1;
429 enum bch_errcode restarted:16;
433 unsigned long last_begin_ip;
434 unsigned long last_restarted_ip;
435 unsigned long srcu_lock_time;
438 struct btree_bkey_cached_common *locking;
439 struct six_lock_waiter locking_wait;
443 u16 journal_entries_u64s;
444 u16 journal_entries_size;
445 struct jset_entry *journal_entries;
447 struct btree_trans_commit_hook *hooks;
448 struct journal_entry_pin *journal_pin;
450 struct journal_res journal_res;
452 struct disk_reservation *disk_res;
454 struct bch_fs_usage_base fs_usage_delta;
456 unsigned journal_u64s;
457 unsigned extra_disk_res; /* XXX kill */
458 struct replicas_delta_list *fs_usage_deltas;
460 /* Entries before this are zeroed out on every bch2_trans_get() call */
462 struct list_head list;
465 unsigned long _paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
466 struct btree_trans_paths trans_paths;
467 struct btree_path _paths[BTREE_ITER_INITIAL];
468 btree_path_idx_t _sorted[BTREE_ITER_INITIAL + 4];
469 struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
472 static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
474 return trans->paths + iter->path;
477 static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
479 return iter->key_cache_path
480 ? trans->paths + iter->key_cache_path
484 #define BCH_BTREE_WRITE_TYPES() \
486 x(init_next_bset, 1) \
487 x(cache_reclaim, 2) \
488 x(journal_reclaim, 3) \
491 enum btree_write_type {
492 #define x(t, n) BTREE_WRITE_##t,
493 BCH_BTREE_WRITE_TYPES()
498 #define BTREE_WRITE_TYPE_MASK (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
499 #define BTREE_WRITE_TYPE_BITS ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
501 #define BTREE_FLAGS() \
507 x(will_make_reachable) \
512 x(write_in_flight_inner) \
520 /* First bits for btree node write type */
521 BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
522 #define x(flag) BTREE_NODE_##flag,
528 static inline bool btree_node_ ## flag(struct btree *b) \
529 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
531 static inline void set_btree_node_ ## flag(struct btree *b) \
532 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
534 static inline void clear_btree_node_ ## flag(struct btree *b) \
535 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
540 static inline struct btree_write *btree_current_write(struct btree *b)
542 return b->writes + btree_node_write_idx(b);
545 static inline struct btree_write *btree_prev_write(struct btree *b)
547 return b->writes + (btree_node_write_idx(b) ^ 1);
550 static inline struct bset_tree *bset_tree_last(struct btree *b)
553 return b->set + b->nsets - 1;
557 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
559 return (void *) ((u64 *) b->data + 1 + offset);
563 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
565 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
567 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
571 static inline struct bset *bset(const struct btree *b,
572 const struct bset_tree *t)
574 return __btree_node_offset_to_ptr(b, t->data_offset);
577 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
580 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
583 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
584 const struct bset *i)
586 t->data_offset = __btree_node_ptr_to_offset(b, i);
587 set_btree_bset_end(b, t);
590 static inline struct bset *btree_bset_first(struct btree *b)
592 return bset(b, b->set);
595 static inline struct bset *btree_bset_last(struct btree *b)
597 return bset(b, bset_tree_last(b));
601 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
603 return __btree_node_ptr_to_offset(b, k);
606 static inline struct bkey_packed *
607 __btree_node_offset_to_key(const struct btree *b, u16 k)
609 return __btree_node_offset_to_ptr(b, k);
612 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
614 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
617 #define btree_bkey_first(_b, _t) \
619 EBUG_ON(bset(_b, _t)->start != \
620 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
622 bset(_b, _t)->start; \
625 #define btree_bkey_last(_b, _t) \
627 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
628 vstruct_last(bset(_b, _t))); \
630 __btree_node_offset_to_key(_b, (_t)->end_offset); \
633 static inline unsigned bset_u64s(struct bset_tree *t)
635 return t->end_offset - t->data_offset -
636 sizeof(struct bset) / sizeof(u64);
639 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
641 return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
644 static inline unsigned bset_byte_offset(struct btree *b, void *i)
646 return i - (void *) b->data;
649 enum btree_node_type {
651 #define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
657 /* Type of a key in btree @id at level @level: */
658 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
660 return level ? BKEY_TYPE_btree : (unsigned) id + 1;
663 /* Type of keys @b contains: */
664 static inline enum btree_node_type btree_node_type(struct btree *b)
666 return __btree_node_type(b->c.level, b->c.btree_id);
669 const char *bch2_btree_node_type_str(enum btree_node_type);
671 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
672 (BIT_ULL(BKEY_TYPE_extents)| \
673 BIT_ULL(BKEY_TYPE_alloc)| \
674 BIT_ULL(BKEY_TYPE_inodes)| \
675 BIT_ULL(BKEY_TYPE_stripes)| \
676 BIT_ULL(BKEY_TYPE_reflink)| \
677 BIT_ULL(BKEY_TYPE_subvolumes)| \
678 BIT_ULL(BKEY_TYPE_btree))
680 #define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS \
681 (BIT_ULL(BKEY_TYPE_alloc)| \
682 BIT_ULL(BKEY_TYPE_inodes)| \
683 BIT_ULL(BKEY_TYPE_stripes)| \
684 BIT_ULL(BKEY_TYPE_snapshots))
686 #define BTREE_NODE_TYPE_HAS_TRIGGERS \
687 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
688 BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
690 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
692 return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
695 static inline bool btree_node_type_is_extents(enum btree_node_type type)
697 const unsigned mask = 0
698 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
703 return (1U << type) & mask;
706 static inline bool btree_id_is_extents(enum btree_id btree)
708 return btree_node_type_is_extents(__btree_node_type(0, btree));
711 static inline bool btree_type_has_snapshots(enum btree_id id)
713 const unsigned mask = 0
714 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
719 return (1U << id) & mask;
722 static inline bool btree_type_has_snapshot_field(enum btree_id id)
724 const unsigned mask = 0
725 #define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
730 return (1U << id) & mask;
733 static inline bool btree_type_has_ptrs(enum btree_id id)
735 const unsigned mask = 0
736 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
741 return (1U << id) & mask;
747 /* On disk root - see async splits: */
748 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
754 enum btree_gc_coalesce_fail_reason {
755 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
756 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
757 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
760 enum btree_node_sibling {
765 struct get_locks_fail {
770 #endif /* _BCACHEFS_BTREE_TYPES_H */