1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
13 static int bch2_memcmp(const void *l, const void *r, size_t size)
15 return memcmp(l, r, size);
18 /* Replicas tracking - in memory: */
20 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
22 #ifdef CONFIG_BCACHEFS_DEBUG
25 BUG_ON(e->data_type >= BCH_DATA_NR);
27 BUG_ON(e->nr_required > 1 &&
28 e->nr_required >= e->nr_devs);
30 for (i = 0; i + 1 < e->nr_devs; i++)
31 BUG_ON(e->devs[i] >= e->devs[i + 1]);
35 void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
37 bubble_sort(e->devs, e->nr_devs, u8_cmp);
40 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
42 eytzinger0_sort(r->entries, r->nr, r->entry_size, bch2_memcmp, NULL);
45 static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
46 struct bch_replicas_entry_v0 *e)
48 bch2_prt_data_type(out, e->data_type);
50 prt_printf(out, ": %u [", e->nr_devs);
51 for (unsigned i = 0; i < e->nr_devs; i++)
52 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
56 void bch2_replicas_entry_to_text(struct printbuf *out,
57 struct bch_replicas_entry_v1 *e)
59 bch2_prt_data_type(out, e->data_type);
61 prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
62 for (unsigned i = 0; i < e->nr_devs; i++)
63 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
67 int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
72 prt_printf(err, "no devices in entry ");
76 if (r->nr_required > 1 &&
77 r->nr_required >= r->nr_devs) {
78 prt_printf(err, "bad nr_required in entry ");
82 for (unsigned i = 0; i < r->nr_devs; i++)
83 if (!bch2_dev_exists(sb, r->devs[i])) {
84 prt_printf(err, "invalid device %u in entry ", r->devs[i]);
90 bch2_replicas_entry_to_text(err, r);
91 return -BCH_ERR_invalid_replicas_entry;
94 void bch2_cpu_replicas_to_text(struct printbuf *out,
95 struct bch_replicas_cpu *r)
97 struct bch_replicas_entry_v1 *e;
100 for_each_cpu_replicas_entry(r, e) {
102 prt_printf(out, " ");
105 bch2_replicas_entry_to_text(out, e);
109 static void extent_to_replicas(struct bkey_s_c k,
110 struct bch_replicas_entry_v1 *r)
112 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
113 const union bch_extent_entry *entry;
114 struct extent_ptr_decoded p;
118 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
123 r->devs[r->nr_devs++] = p.ptr.dev;
129 static void stripe_to_replicas(struct bkey_s_c k,
130 struct bch_replicas_entry_v1 *r)
132 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
133 const struct bch_extent_ptr *ptr;
135 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
137 for (ptr = s.v->ptrs;
138 ptr < s.v->ptrs + s.v->nr_blocks;
140 r->devs[r->nr_devs++] = ptr->dev;
143 void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
149 case KEY_TYPE_btree_ptr:
150 case KEY_TYPE_btree_ptr_v2:
151 e->data_type = BCH_DATA_btree;
152 extent_to_replicas(k, e);
154 case KEY_TYPE_extent:
155 case KEY_TYPE_reflink_v:
156 e->data_type = BCH_DATA_user;
157 extent_to_replicas(k, e);
159 case KEY_TYPE_stripe:
160 e->data_type = BCH_DATA_parity;
161 stripe_to_replicas(k, e);
165 bch2_replicas_entry_sort(e);
168 void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
169 enum bch_data_type data_type,
170 struct bch_devs_list devs)
173 data_type == BCH_DATA_sb ||
174 data_type >= BCH_DATA_NR);
176 e->data_type = data_type;
180 darray_for_each(devs, i)
181 e->devs[e->nr_devs++] = *i;
183 bch2_replicas_entry_sort(e);
186 static struct bch_replicas_cpu
187 cpu_replicas_add_entry(struct bch_fs *c,
188 struct bch_replicas_cpu *old,
189 struct bch_replicas_entry_v1 *new_entry)
192 struct bch_replicas_cpu new = {
194 .entry_size = max_t(unsigned, old->entry_size,
195 replicas_entry_bytes(new_entry)),
198 for (i = 0; i < new_entry->nr_devs; i++)
199 BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
201 BUG_ON(!new_entry->data_type);
202 verify_replicas_entry(new_entry);
204 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
208 for (i = 0; i < old->nr; i++)
209 memcpy(cpu_replicas_entry(&new, i),
210 cpu_replicas_entry(old, i),
213 memcpy(cpu_replicas_entry(&new, old->nr),
215 replicas_entry_bytes(new_entry));
217 bch2_cpu_replicas_sort(&new);
221 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
222 struct bch_replicas_entry_v1 *search)
224 int idx, entry_size = replicas_entry_bytes(search);
226 if (unlikely(entry_size > r->entry_size))
229 verify_replicas_entry(search);
231 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
232 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
236 return idx < r->nr ? idx : -1;
239 int bch2_replicas_entry_idx(struct bch_fs *c,
240 struct bch_replicas_entry_v1 *search)
242 bch2_replicas_entry_sort(search);
244 return __replicas_entry_idx(&c->replicas, search);
247 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
248 struct bch_replicas_entry_v1 *search)
250 return __replicas_entry_idx(r, search) >= 0;
253 bool bch2_replicas_marked(struct bch_fs *c,
254 struct bch_replicas_entry_v1 *search)
258 if (!search->nr_devs)
261 verify_replicas_entry(search);
263 percpu_down_read(&c->mark_lock);
264 marked = __replicas_has_entry(&c->replicas, search) &&
265 (likely((!c->replicas_gc.entries)) ||
266 __replicas_has_entry(&c->replicas_gc, search));
267 percpu_up_read(&c->mark_lock);
272 static void __replicas_table_update(struct bch_fs_usage *dst,
273 struct bch_replicas_cpu *dst_r,
274 struct bch_fs_usage *src,
275 struct bch_replicas_cpu *src_r)
277 int src_idx, dst_idx;
281 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
282 if (!src->replicas[src_idx])
285 dst_idx = __replicas_entry_idx(dst_r,
286 cpu_replicas_entry(src_r, src_idx));
289 dst->replicas[dst_idx] = src->replicas[src_idx];
293 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
294 struct bch_replicas_cpu *dst_r,
295 struct bch_fs_usage __percpu *src_p,
296 struct bch_replicas_cpu *src_r)
298 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
299 struct bch_fs_usage *dst, *src = (void *)
300 bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
303 dst = this_cpu_ptr(dst_p);
306 __replicas_table_update(dst, dst_r, src, src_r);
310 * Resize filesystem accounting:
312 static int replicas_table_update(struct bch_fs *c,
313 struct bch_replicas_cpu *new_r)
315 struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
316 struct bch_fs_usage_online *new_scratch = NULL;
317 struct bch_fs_usage __percpu *new_gc = NULL;
318 struct bch_fs_usage *new_base = NULL;
319 unsigned i, bytes = sizeof(struct bch_fs_usage) +
320 sizeof(u64) * new_r->nr;
321 unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
322 sizeof(u64) * new_r->nr;
325 memset(new_usage, 0, sizeof(new_usage));
327 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
328 if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
329 sizeof(u64), GFP_KERNEL)))
332 if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
333 !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
335 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
338 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
340 __replicas_table_update_pcpu(new_usage[i], new_r,
341 c->usage[i], &c->replicas);
343 __replicas_table_update(new_base, new_r,
344 c->usage_base, &c->replicas);
346 __replicas_table_update_pcpu(new_gc, new_r,
347 c->usage_gc, &c->replicas);
349 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
350 swap(c->usage[i], new_usage[i]);
351 swap(c->usage_base, new_base);
352 swap(c->usage_scratch, new_scratch);
353 swap(c->usage_gc, new_gc);
354 swap(c->replicas, *new_r);
358 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
359 free_percpu(new_usage[i]);
363 bch_err(c, "error updating replicas table: memory allocation failure");
364 ret = -BCH_ERR_ENOMEM_replicas_table;
368 static unsigned reserve_journal_replicas(struct bch_fs *c,
369 struct bch_replicas_cpu *r)
371 struct bch_replicas_entry_v1 *e;
372 unsigned journal_res_u64s = 0;
376 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
380 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
382 /* persistent_reserved: */
384 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
387 for_each_cpu_replicas_entry(r, e)
389 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
390 e->nr_devs, sizeof(u64));
391 return journal_res_u64s;
395 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
396 struct bch_replicas_entry_v1 *new_entry)
398 struct bch_replicas_cpu new_r, new_gc;
401 verify_replicas_entry(new_entry);
403 memset(&new_r, 0, sizeof(new_r));
404 memset(&new_gc, 0, sizeof(new_gc));
406 mutex_lock(&c->sb_lock);
408 if (c->replicas_gc.entries &&
409 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
410 new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
411 if (!new_gc.entries) {
412 ret = -BCH_ERR_ENOMEM_cpu_replicas;
417 if (!__replicas_has_entry(&c->replicas, new_entry)) {
418 new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
419 if (!new_r.entries) {
420 ret = -BCH_ERR_ENOMEM_cpu_replicas;
424 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
428 bch2_journal_entry_res_resize(&c->journal,
429 &c->replicas_journal_res,
430 reserve_journal_replicas(c, &new_r));
433 if (!new_r.entries &&
437 /* allocations done, now commit: */
442 /* don't update in memory replicas until changes are persistent */
443 percpu_down_write(&c->mark_lock);
445 ret = replicas_table_update(c, &new_r);
447 swap(new_gc, c->replicas_gc);
448 percpu_up_write(&c->mark_lock);
450 mutex_unlock(&c->sb_lock);
452 kfree(new_r.entries);
453 kfree(new_gc.entries);
457 bch_err_msg(c, ret, "adding replicas entry");
461 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
463 return likely(bch2_replicas_marked(c, r))
464 ? 0 : bch2_mark_replicas_slowpath(c, r);
467 /* replicas delta list: */
469 int bch2_replicas_delta_list_mark(struct bch_fs *c,
470 struct replicas_delta_list *r)
472 struct replicas_delta *d = r->d;
473 struct replicas_delta *top = (void *) r->d + r->used;
476 for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
477 ret = bch2_mark_replicas(c, &d->r);
482 * Old replicas_gc mechanism: only used for journal replicas entries now, should
486 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
488 lockdep_assert_held(&c->replicas_gc_lock);
490 mutex_lock(&c->sb_lock);
491 percpu_down_write(&c->mark_lock);
494 bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc) ?:
495 replicas_table_update(c, &c->replicas_gc);
497 kfree(c->replicas_gc.entries);
498 c->replicas_gc.entries = NULL;
500 percpu_up_write(&c->mark_lock);
505 mutex_unlock(&c->sb_lock);
510 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
512 struct bch_replicas_entry_v1 *e;
515 lockdep_assert_held(&c->replicas_gc_lock);
517 mutex_lock(&c->sb_lock);
518 BUG_ON(c->replicas_gc.entries);
520 c->replicas_gc.nr = 0;
521 c->replicas_gc.entry_size = 0;
523 for_each_cpu_replicas_entry(&c->replicas, e)
524 if (!((1 << e->data_type) & typemask)) {
526 c->replicas_gc.entry_size =
527 max_t(unsigned, c->replicas_gc.entry_size,
528 replicas_entry_bytes(e));
531 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
532 c->replicas_gc.entry_size,
534 if (!c->replicas_gc.entries) {
535 mutex_unlock(&c->sb_lock);
536 bch_err(c, "error allocating c->replicas_gc");
537 return -BCH_ERR_ENOMEM_replicas_gc;
540 for_each_cpu_replicas_entry(&c->replicas, e)
541 if (!((1 << e->data_type) & typemask))
542 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
543 e, c->replicas_gc.entry_size);
545 bch2_cpu_replicas_sort(&c->replicas_gc);
546 mutex_unlock(&c->sb_lock);
552 * New much simpler mechanism for clearing out unneeded replicas entries - drop
553 * replicas entries that have 0 sectors used.
555 * However, we don't track sector counts for journal usage, so this doesn't drop
556 * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
557 * is retained for that.
559 int bch2_replicas_gc2(struct bch_fs *c)
561 struct bch_replicas_cpu new = { 0 };
565 bch2_journal_meta(&c->journal);
567 nr = READ_ONCE(c->replicas.nr);
568 new.entry_size = READ_ONCE(c->replicas.entry_size);
569 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
571 bch_err(c, "error allocating c->replicas_gc");
572 return -BCH_ERR_ENOMEM_replicas_gc;
575 mutex_lock(&c->sb_lock);
576 percpu_down_write(&c->mark_lock);
578 if (nr != c->replicas.nr ||
579 new.entry_size != c->replicas.entry_size) {
580 percpu_up_write(&c->mark_lock);
581 mutex_unlock(&c->sb_lock);
586 for (i = 0; i < c->replicas.nr; i++) {
587 struct bch_replicas_entry_v1 *e =
588 cpu_replicas_entry(&c->replicas, i);
590 if (e->data_type == BCH_DATA_journal ||
591 c->usage_base->replicas[i] ||
592 percpu_u64_get(&c->usage[0]->replicas[i]) ||
593 percpu_u64_get(&c->usage[1]->replicas[i]) ||
594 percpu_u64_get(&c->usage[2]->replicas[i]) ||
595 percpu_u64_get(&c->usage[3]->replicas[i]))
596 memcpy(cpu_replicas_entry(&new, new.nr++),
600 bch2_cpu_replicas_sort(&new);
602 ret = bch2_cpu_replicas_to_sb_replicas(c, &new) ?:
603 replicas_table_update(c, &new);
607 percpu_up_write(&c->mark_lock);
612 mutex_unlock(&c->sb_lock);
617 int bch2_replicas_set_usage(struct bch_fs *c,
618 struct bch_replicas_entry_v1 *r,
621 int ret, idx = bch2_replicas_entry_idx(c, r);
624 struct bch_replicas_cpu n;
626 n = cpu_replicas_add_entry(c, &c->replicas, r);
628 return -BCH_ERR_ENOMEM_cpu_replicas;
630 ret = replicas_table_update(c, &n);
636 idx = bch2_replicas_entry_idx(c, r);
640 c->usage_base->replicas[idx] = sectors;
645 /* Replicas tracking - superblock: */
648 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
649 struct bch_replicas_cpu *cpu_r)
651 struct bch_replicas_entry_v1 *e, *dst;
652 unsigned nr = 0, entry_size = 0, idx = 0;
654 for_each_replicas_entry(sb_r, e) {
655 entry_size = max_t(unsigned, entry_size,
656 replicas_entry_bytes(e));
660 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
662 return -BCH_ERR_ENOMEM_cpu_replicas;
665 cpu_r->entry_size = entry_size;
667 for_each_replicas_entry(sb_r, e) {
668 dst = cpu_replicas_entry(cpu_r, idx++);
669 memcpy(dst, e, replicas_entry_bytes(e));
670 bch2_replicas_entry_sort(dst);
677 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
678 struct bch_replicas_cpu *cpu_r)
680 struct bch_replicas_entry_v0 *e;
681 unsigned nr = 0, entry_size = 0, idx = 0;
683 for_each_replicas_entry(sb_r, e) {
684 entry_size = max_t(unsigned, entry_size,
685 replicas_entry_bytes(e));
689 entry_size += sizeof(struct bch_replicas_entry_v1) -
690 sizeof(struct bch_replicas_entry_v0);
692 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
694 return -BCH_ERR_ENOMEM_cpu_replicas;
697 cpu_r->entry_size = entry_size;
699 for_each_replicas_entry(sb_r, e) {
700 struct bch_replicas_entry_v1 *dst =
701 cpu_replicas_entry(cpu_r, idx++);
703 dst->data_type = e->data_type;
704 dst->nr_devs = e->nr_devs;
705 dst->nr_required = 1;
706 memcpy(dst->devs, e->devs, e->nr_devs);
707 bch2_replicas_entry_sort(dst);
713 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
715 struct bch_sb_field_replicas *sb_v1;
716 struct bch_sb_field_replicas_v0 *sb_v0;
717 struct bch_replicas_cpu new_r = { 0, 0, NULL };
720 if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas)))
721 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
722 else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0)))
723 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
727 bch2_cpu_replicas_sort(&new_r);
729 percpu_down_write(&c->mark_lock);
731 ret = replicas_table_update(c, &new_r);
732 percpu_up_write(&c->mark_lock);
734 kfree(new_r.entries);
739 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
740 struct bch_replicas_cpu *r)
742 struct bch_sb_field_replicas_v0 *sb_r;
743 struct bch_replicas_entry_v0 *dst;
744 struct bch_replicas_entry_v1 *src;
747 bytes = sizeof(struct bch_sb_field_replicas);
749 for_each_cpu_replicas_entry(r, src)
750 bytes += replicas_entry_bytes(src) - 1;
752 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
753 DIV_ROUND_UP(bytes, sizeof(u64)));
755 return -BCH_ERR_ENOSPC_sb_replicas;
757 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
758 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
760 memset(&sb_r->entries, 0,
761 vstruct_end(&sb_r->field) -
762 (void *) &sb_r->entries);
765 for_each_cpu_replicas_entry(r, src) {
766 dst->data_type = src->data_type;
767 dst->nr_devs = src->nr_devs;
768 memcpy(dst->devs, src->devs, src->nr_devs);
770 dst = replicas_entry_next(dst);
772 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
778 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
779 struct bch_replicas_cpu *r)
781 struct bch_sb_field_replicas *sb_r;
782 struct bch_replicas_entry_v1 *dst, *src;
783 bool need_v1 = false;
786 bytes = sizeof(struct bch_sb_field_replicas);
788 for_each_cpu_replicas_entry(r, src) {
789 bytes += replicas_entry_bytes(src);
790 if (src->nr_required != 1)
795 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
797 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
798 DIV_ROUND_UP(bytes, sizeof(u64)));
800 return -BCH_ERR_ENOSPC_sb_replicas;
802 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
803 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);
805 memset(&sb_r->entries, 0,
806 vstruct_end(&sb_r->field) -
807 (void *) &sb_r->entries);
810 for_each_cpu_replicas_entry(r, src) {
811 memcpy(dst, src, replicas_entry_bytes(src));
813 dst = replicas_entry_next(dst);
815 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
821 static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
823 struct printbuf *err)
827 sort_cmp_size(cpu_r->entries,
832 for (i = 0; i < cpu_r->nr; i++) {
833 struct bch_replicas_entry_v1 *e =
834 cpu_replicas_entry(cpu_r, i);
836 int ret = bch2_replicas_entry_validate(e, sb, err);
840 if (i + 1 < cpu_r->nr) {
841 struct bch_replicas_entry_v1 *n =
842 cpu_replicas_entry(cpu_r, i + 1);
844 BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
846 if (!memcmp(e, n, cpu_r->entry_size)) {
847 prt_printf(err, "duplicate replicas entry ");
848 bch2_replicas_entry_to_text(err, e);
849 return -BCH_ERR_invalid_sb_replicas;
857 static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
858 struct printbuf *err)
860 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
861 struct bch_replicas_cpu cpu_r;
864 ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
868 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
869 kfree(cpu_r.entries);
873 static void bch2_sb_replicas_to_text(struct printbuf *out,
875 struct bch_sb_field *f)
877 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
878 struct bch_replicas_entry_v1 *e;
881 for_each_replicas_entry(r, e) {
883 prt_printf(out, " ");
886 bch2_replicas_entry_to_text(out, e);
891 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
892 .validate = bch2_sb_replicas_validate,
893 .to_text = bch2_sb_replicas_to_text,
896 static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
897 struct printbuf *err)
899 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
900 struct bch_replicas_cpu cpu_r;
903 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
907 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
908 kfree(cpu_r.entries);
912 static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
914 struct bch_sb_field *f)
916 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
917 struct bch_replicas_entry_v0 *e;
920 for_each_replicas_entry(sb_r, e) {
922 prt_printf(out, " ");
925 bch2_replicas_entry_v0_to_text(out, e);
930 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
931 .validate = bch2_sb_replicas_v0_validate,
932 .to_text = bch2_sb_replicas_v0_to_text,
935 /* Query replicas: */
937 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
938 unsigned flags, bool print)
940 struct bch_replicas_entry_v1 *e;
943 percpu_down_read(&c->mark_lock);
944 for_each_cpu_replicas_entry(&c->replicas, e) {
945 unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
946 bool metadata = e->data_type < BCH_DATA_user;
948 if (e->data_type == BCH_DATA_cached)
951 for (i = 0; i < e->nr_devs; i++) {
952 struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
954 nr_online += test_bit(e->devs[i], devs.d);
955 nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
958 if (nr_failed == e->nr_devs)
961 if (nr_online < e->nr_required)
963 ? BCH_FORCE_IF_METADATA_LOST
964 : BCH_FORCE_IF_DATA_LOST;
966 if (nr_online < e->nr_devs)
968 ? BCH_FORCE_IF_METADATA_DEGRADED
969 : BCH_FORCE_IF_DATA_DEGRADED;
971 if (dflags & ~flags) {
973 struct printbuf buf = PRINTBUF;
975 bch2_replicas_entry_to_text(&buf, e);
976 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
985 percpu_up_read(&c->mark_lock);
990 unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
992 struct bch_sb_field_replicas *replicas;
993 struct bch_sb_field_replicas_v0 *replicas_v0;
994 unsigned i, data_has = 0;
996 replicas = bch2_sb_field_get(sb, replicas);
997 replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
1000 struct bch_replicas_entry_v1 *r;
1002 for_each_replicas_entry(replicas, r)
1003 for (i = 0; i < r->nr_devs; i++)
1004 if (r->devs[i] == dev)
1005 data_has |= 1 << r->data_type;
1006 } else if (replicas_v0) {
1007 struct bch_replicas_entry_v0 *r;
1009 for_each_replicas_entry_v0(replicas_v0, r)
1010 for (i = 0; i < r->nr_devs; i++)
1011 if (r->devs[i] == dev)
1012 data_has |= 1 << r->data_type;
1019 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1023 mutex_lock(&c->sb_lock);
1024 ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
1025 mutex_unlock(&c->sb_lock);
1030 void bch2_fs_replicas_exit(struct bch_fs *c)
1034 kfree(c->usage_scratch);
1035 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1036 free_percpu(c->usage[i]);
1037 kfree(c->usage_base);
1038 kfree(c->replicas.entries);
1039 kfree(c->replicas_gc.entries);
1041 mempool_exit(&c->replicas_delta_pool);
1044 int bch2_fs_replicas_init(struct bch_fs *c)
1046 bch2_journal_entry_res_resize(&c->journal,
1047 &c->replicas_journal_res,
1048 reserve_journal_replicas(c, &c->replicas));
1050 return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
1051 REPLICAS_DELTA_LIST_MAX) ?:
1052 replicas_table_update(c, &c->replicas);