GNU Linux-libre 6.9.1-gnu
[releases.git] / fs / bcachefs / sb-members.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_SB_MEMBERS_H
3 #define _BCACHEFS_SB_MEMBERS_H
4
5 #include "darray.h"
6 #include "bkey_types.h"
7
8 extern char * const bch2_member_error_strs[];
9
10 static inline struct bch_member *
11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i)
12 {
13         return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
14 }
15
16 int bch2_sb_members_v2_init(struct bch_fs *c);
17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
18 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
19 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
20
21 static inline bool bch2_dev_is_online(struct bch_dev *ca)
22 {
23         return !percpu_ref_is_zero(&ca->io_ref);
24 }
25
26 static inline bool bch2_dev_is_readable(struct bch_dev *ca)
27 {
28         return bch2_dev_is_online(ca) &&
29                 ca->mi.state != BCH_MEMBER_STATE_failed;
30 }
31
32 static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
33 {
34         if (!percpu_ref_tryget(&ca->io_ref))
35                 return false;
36
37         if (ca->mi.state == BCH_MEMBER_STATE_rw ||
38             (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
39                 return true;
40
41         percpu_ref_put(&ca->io_ref);
42         return false;
43 }
44
45 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
46 {
47         return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
48 }
49
50 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
51                                          unsigned dev)
52 {
53         darray_for_each(devs, i)
54                 if (*i == dev)
55                         return true;
56         return false;
57 }
58
59 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
60                                           unsigned dev)
61 {
62         darray_for_each(*devs, i)
63                 if (*i == dev) {
64                         darray_remove_item(devs, i);
65                         return;
66                 }
67 }
68
69 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
70                                          unsigned dev)
71 {
72         if (!bch2_dev_list_has_dev(*devs, dev)) {
73                 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data));
74                 devs->data[devs->nr++] = dev;
75         }
76 }
77
78 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
79 {
80         return (struct bch_devs_list) { .nr = 1, .data[0] = dev };
81 }
82
83 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
84                                                   const struct bch_devs_mask *mask)
85 {
86         struct bch_dev *ca = NULL;
87
88         while ((idx = mask
89                 ? find_next_bit(mask->d, c->sb.nr_devices, idx)
90                 : idx) < c->sb.nr_devices &&
91                !(ca = rcu_dereference_check(c->devs[idx],
92                                             lockdep_is_held(&c->state_lock))))
93                 idx++;
94
95         return ca;
96 }
97
98 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
99                                               const struct bch_devs_mask *mask)
100 {
101         return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
102 }
103
104 #define for_each_member_device_rcu(_c, _ca, _mask)                      \
105         for (struct bch_dev *_ca = NULL;                                \
106              (_ca = __bch2_next_dev((_c), _ca, (_mask)));)
107
108 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
109 {
110         rcu_read_lock();
111         if (ca)
112                 percpu_ref_put(&ca->ref);
113
114         if ((ca = __bch2_next_dev(c, ca, NULL)))
115                 percpu_ref_get(&ca->ref);
116         rcu_read_unlock();
117
118         return ca;
119 }
120
121 /*
122  * If you break early, you must drop your ref on the current device
123  */
124 #define __for_each_member_device(_c, _ca)                               \
125         for (;  (_ca = bch2_get_next_dev(_c, _ca));)
126
127 #define for_each_member_device(_c, _ca)                                 \
128         for (struct bch_dev *_ca = NULL;                                \
129              (_ca = bch2_get_next_dev(_c, _ca));)
130
131 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
132                                                        struct bch_dev *ca,
133                                                        unsigned state_mask)
134 {
135         rcu_read_lock();
136         if (ca)
137                 percpu_ref_put(&ca->io_ref);
138
139         while ((ca = __bch2_next_dev(c, ca, NULL)) &&
140                (!((1 << ca->mi.state) & state_mask) ||
141                 !percpu_ref_tryget(&ca->io_ref)))
142                 ;
143         rcu_read_unlock();
144
145         return ca;
146 }
147
148 #define __for_each_online_member(_c, _ca, state_mask)                   \
149         for (struct bch_dev *_ca = NULL;                                \
150              (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
151
152 #define for_each_online_member(c, ca)                                   \
153         __for_each_online_member(c, ca, ~0)
154
155 #define for_each_rw_member(c, ca)                                       \
156         __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
157
158 #define for_each_readable_member(c, ca)                         \
159         __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
160
161 /*
162  * If a key exists that references a device, the device won't be going away and
163  * we can omit rcu_read_lock():
164  */
165 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
166 {
167         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
168
169         return rcu_dereference_check(c->devs[idx], 1);
170 }
171
172 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
173 {
174         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
175
176         return rcu_dereference_protected(c->devs[idx],
177                                          lockdep_is_held(&c->sb_lock) ||
178                                          lockdep_is_held(&c->state_lock));
179 }
180
181 /* XXX kill, move to struct bch_fs */
182 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
183 {
184         struct bch_devs_mask devs;
185
186         memset(&devs, 0, sizeof(devs));
187         for_each_online_member(c, ca)
188                 __set_bit(ca->dev_idx, devs.d);
189         return devs;
190 }
191
192 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
193 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
194
195 static inline bool bch2_member_exists(struct bch_member *m)
196 {
197         return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
198 }
199
200 static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
201 {
202         if (dev < sb->nr_devices) {
203                 struct bch_member m = bch2_sb_member_get(sb, dev);
204                 return bch2_member_exists(&m);
205         }
206         return false;
207 }
208
209 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
210 {
211         return (struct bch_member_cpu) {
212                 .nbuckets       = le64_to_cpu(mi->nbuckets),
213                 .first_bucket   = le16_to_cpu(mi->first_bucket),
214                 .bucket_size    = le16_to_cpu(mi->bucket_size),
215                 .group          = BCH_MEMBER_GROUP(mi),
216                 .state          = BCH_MEMBER_STATE(mi),
217                 .discard        = BCH_MEMBER_DISCARD(mi),
218                 .data_allowed   = BCH_MEMBER_DATA_ALLOWED(mi),
219                 .durability     = BCH_MEMBER_DURABILITY(mi)
220                         ? BCH_MEMBER_DURABILITY(mi) - 1
221                         : 1,
222                 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
223                 .valid          = bch2_member_exists(mi),
224                 .btree_bitmap_shift     = mi->btree_bitmap_shift,
225                 .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
226         };
227 }
228
229 void bch2_sb_members_from_cpu(struct bch_fs *);
230
231 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
232 void bch2_dev_errors_reset(struct bch_dev *);
233
234 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
235 {
236         u64 end = start + sectors;
237
238         if (end > 64ULL << ca->mi.btree_bitmap_shift)
239                 return false;
240
241         for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
242              (u64) bit << ca->mi.btree_bitmap_shift < end;
243              bit++)
244                 if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
245                         return false;
246         return true;
247 }
248
249 bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
250 void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
251
252 #endif /* _BCACHEFS_SB_MEMBERS_H */