1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
16 #include "disk_groups.h"
26 #include <linux/sort.h>
30 #include <linux/raid/pq.h>
31 #include <linux/raid/xor.h>
33 static void raid5_recov(unsigned disks, unsigned failed_idx,
34 size_t size, void **data)
38 BUG_ON(failed_idx >= disks);
40 swap(data[0], data[failed_idx]);
41 memcpy(data[0], data[1], size);
44 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
45 xor_blocks(nr, size, data[0], data + i);
49 swap(data[0], data[failed_idx]);
52 static void raid_gen(int nd, int np, size_t size, void **v)
55 raid5_recov(nd + np, nd, size, v);
57 raid6_call.gen_syndrome(nd + np, size, v);
61 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
68 raid5_recov(nd + 1, ir[0], size, v);
70 raid6_call.gen_syndrome(nd + np, size, v);
74 /* data+data failure. */
75 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
76 } else if (ir[0] < nd) {
77 /* data + p/q failure */
79 if (ir[1] == nd) /* data + p failure */
80 raid6_datap_recov(nd + np, size, ir[0], v);
81 else { /* data + q failure */
82 raid5_recov(nd + 1, ir[0], size, v);
83 raid6_call.gen_syndrome(nd + np, size, v);
86 raid_gen(nd, np, size, v);
96 #include <raid/raid.h>
102 struct ec_stripe_buf *buf;
107 /* Stripes btree keys: */
109 int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
110 enum bkey_invalid_flags flags,
111 struct printbuf *err)
113 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
116 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
117 bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
119 "stripe at bad pos");
121 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
123 "incorrect value size (%zu < %u)",
124 bkey_val_u64s(k.k), stripe_val_u64s(s));
126 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
131 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
134 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
135 struct bch_stripe s = {};
137 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
139 unsigned nr_data = s.nr_blocks - s.nr_redundant;
141 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
143 le16_to_cpu(s.sectors),
146 bch2_prt_csum_type(out, s.csum_type);
147 prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
149 for (unsigned i = 0; i < s.nr_blocks; i++) {
150 const struct bch_extent_ptr *ptr = sp->ptrs + i;
152 if ((void *) ptr >= bkey_val_end(k))
155 bch2_extent_ptr_to_text(out, c, ptr);
157 if (s.csum_type < BCH_CSUM_NR &&
159 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
160 prt_printf(out, "#%u", stripe_blockcount_get(sp, i));
166 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
167 struct bkey_s_c_stripe s,
168 unsigned idx, bool deleting)
170 struct bch_fs *c = trans->c;
171 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
172 struct btree_iter iter;
173 struct bkey_i_alloc_v4 *a;
174 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
175 ? BCH_DATA_parity : 0;
176 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
182 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
186 ret = bch2_check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
187 a->v.gen, a->v.data_type,
193 if (bch2_trans_inconsistent_on(a->v.stripe ||
194 a->v.stripe_redundancy, trans,
195 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
196 iter.pos.inode, iter.pos.offset, a->v.gen,
197 bch2_data_type_str(a->v.data_type),
199 a->v.stripe, s.k->p.offset)) {
204 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
205 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
206 iter.pos.inode, iter.pos.offset, a->v.gen,
207 bch2_data_type_str(a->v.data_type),
214 a->v.stripe = s.k->p.offset;
215 a->v.stripe_redundancy = s.v->nr_redundant;
216 a->v.data_type = BCH_DATA_stripe;
218 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
219 a->v.stripe_redundancy != s.v->nr_redundant, trans,
220 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
221 iter.pos.inode, iter.pos.offset, a->v.gen,
222 s.k->p.offset, a->v.stripe)) {
228 a->v.stripe_redundancy = 0;
229 a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
232 a->v.dirty_sectors += sectors;
234 a->v.data_type = !deleting ? data_type : 0;
236 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
240 bch2_trans_iter_exit(trans, &iter);
244 static int mark_stripe_bucket(struct btree_trans *trans,
249 struct bch_fs *c = trans->c;
250 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
251 unsigned nr_data = s->nr_blocks - s->nr_redundant;
252 bool parity = ptr_idx >= nr_data;
253 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
254 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
255 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
256 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
257 struct bucket old, new, *g;
258 struct printbuf buf = PRINTBUF;
261 BUG_ON(!(flags & BTREE_TRIGGER_GC));
263 /* * XXX doesn't handle deletion */
265 percpu_down_read(&c->mark_lock);
266 g = PTR_GC_BUCKET(ca, ptr);
268 if (g->dirty_sectors ||
269 (g->stripe && g->stripe != k.k->p.offset)) {
270 bch2_fs_inconsistent(c,
271 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
272 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
273 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
281 ret = bch2_check_bucket_ref(trans, k, ptr, sectors, data_type,
282 g->gen, g->data_type,
287 g->data_type = data_type;
288 g->dirty_sectors += sectors;
290 g->stripe = k.k->p.offset;
291 g->stripe_redundancy = s->nr_redundant;
296 bch2_dev_usage_update_m(c, ca, &old, &new);
297 percpu_up_read(&c->mark_lock);
302 int bch2_trigger_stripe(struct btree_trans *trans,
303 enum btree_id btree_id, unsigned level,
304 struct bkey_s_c old, struct bkey_s _new,
307 struct bkey_s_c new = _new.s_c;
308 struct bch_fs *c = trans->c;
309 u64 idx = new.k->p.offset;
310 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
311 ? bkey_s_c_to_stripe(old).v : NULL;
312 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
313 ? bkey_s_c_to_stripe(new).v : NULL;
315 if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
317 * If the pointers aren't changing, we don't need to do anything:
319 if (new_s && old_s &&
320 new_s->nr_blocks == old_s->nr_blocks &&
321 new_s->nr_redundant == old_s->nr_redundant &&
322 !memcmp(old_s->ptrs, new_s->ptrs,
323 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
326 BUG_ON(new_s && old_s &&
327 (new_s->nr_blocks != old_s->nr_blocks ||
328 new_s->nr_redundant != old_s->nr_redundant));
331 s64 sectors = le16_to_cpu(new_s->sectors);
333 struct bch_replicas_padded r;
334 bch2_bkey_to_replicas(&r.e, new);
335 int ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
341 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
343 struct bch_replicas_padded r;
344 bch2_bkey_to_replicas(&r.e, old);
345 int ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
350 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
351 for (unsigned i = 0; i < nr_blocks; i++) {
352 if (new_s && old_s &&
353 !memcmp(&new_s->ptrs[i],
355 sizeof(new_s->ptrs[i])))
359 int ret = bch2_trans_mark_stripe_bucket(trans,
360 bkey_s_c_to_stripe(new), i, false);
366 int ret = bch2_trans_mark_stripe_bucket(trans,
367 bkey_s_c_to_stripe(old), i, true);
374 if (flags & BTREE_TRIGGER_ATOMIC) {
375 struct stripe *m = genradix_ptr(&c->stripes, idx);
378 struct printbuf buf1 = PRINTBUF;
379 struct printbuf buf2 = PRINTBUF;
381 bch2_bkey_val_to_text(&buf1, c, old);
382 bch2_bkey_val_to_text(&buf2, c, new);
383 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
385 "new %s", idx, buf1.buf, buf2.buf);
386 printbuf_exit(&buf2);
387 printbuf_exit(&buf1);
388 bch2_inconsistent_error(c);
393 bch2_stripes_heap_del(c, m, idx);
395 memset(m, 0, sizeof(*m));
397 m->sectors = le16_to_cpu(new_s->sectors);
398 m->algorithm = new_s->algorithm;
399 m->nr_blocks = new_s->nr_blocks;
400 m->nr_redundant = new_s->nr_redundant;
401 m->blocks_nonempty = 0;
403 for (unsigned i = 0; i < new_s->nr_blocks; i++)
404 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
407 bch2_stripes_heap_insert(c, m, idx);
409 bch2_stripes_heap_update(c, m, idx);
413 if (flags & BTREE_TRIGGER_GC) {
414 struct gc_stripe *m =
415 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
418 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
420 return -BCH_ERR_ENOMEM_mark_stripe;
423 * This will be wrong when we bring back runtime gc: we should
424 * be unmarking the old key and then marking the new key
427 m->sectors = le16_to_cpu(new_s->sectors);
428 m->nr_blocks = new_s->nr_blocks;
429 m->nr_redundant = new_s->nr_redundant;
431 for (unsigned i = 0; i < new_s->nr_blocks; i++)
432 m->ptrs[i] = new_s->ptrs[i];
434 bch2_bkey_to_replicas(&m->r.e, new);
437 * gc recalculates this field from stripe ptr
440 memset(m->block_sectors, 0, sizeof(m->block_sectors));
442 for (unsigned i = 0; i < new_s->nr_blocks; i++) {
443 int ret = mark_stripe_bucket(trans, new, i, flags);
448 int ret = bch2_update_replicas(c, new, &m->r.e,
449 ((s64) m->sectors * m->nr_redundant),
452 struct printbuf buf = PRINTBUF;
454 bch2_bkey_val_to_text(&buf, c, new);
455 bch2_fs_fatal_error(c, ": no replicas entry for %s", buf.buf);
464 /* returns blocknr in stripe that we matched: */
465 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
466 struct bkey_s_c k, unsigned *block)
468 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
469 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
471 bkey_for_each_ptr(ptrs, ptr)
472 for (i = 0; i < nr_data; i++)
473 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
474 le16_to_cpu(s->sectors))) {
482 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
485 case KEY_TYPE_extent: {
486 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
487 const union bch_extent_entry *entry;
489 extent_for_each_entry(e, entry)
490 if (extent_entry_type(entry) ==
491 BCH_EXTENT_ENTRY_stripe_ptr &&
492 entry->stripe_ptr.idx == idx)
504 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
506 if (buf->key.k.type == KEY_TYPE_stripe) {
507 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
510 for (i = 0; i < s->v.nr_blocks; i++) {
511 kvfree(buf->data[i]);
517 /* XXX: this is a non-mempoolified memory allocation: */
518 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
519 unsigned offset, unsigned size)
521 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
522 unsigned csum_granularity = 1U << v->csum_granularity_bits;
523 unsigned end = offset + size;
526 BUG_ON(end > le16_to_cpu(v->sectors));
528 offset = round_down(offset, csum_granularity);
529 end = min_t(unsigned, le16_to_cpu(v->sectors),
530 round_up(end, csum_granularity));
532 buf->offset = offset;
533 buf->size = end - offset;
535 memset(buf->valid, 0xFF, sizeof(buf->valid));
537 for (i = 0; i < v->nr_blocks; i++) {
538 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
545 ec_stripe_buf_exit(buf);
546 return -BCH_ERR_ENOMEM_stripe_buf;
551 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
552 unsigned block, unsigned offset)
554 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
555 unsigned csum_granularity = 1 << v->csum_granularity_bits;
556 unsigned end = buf->offset + buf->size;
557 unsigned len = min(csum_granularity, end - offset);
559 BUG_ON(offset >= end);
560 BUG_ON(offset < buf->offset);
561 BUG_ON(offset & (csum_granularity - 1));
562 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
563 (len & (csum_granularity - 1)));
565 return bch2_checksum(NULL, v->csum_type,
567 buf->data[block] + ((offset - buf->offset) << 9),
571 static void ec_generate_checksums(struct ec_stripe_buf *buf)
573 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
574 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
580 BUG_ON(buf->size != le16_to_cpu(v->sectors));
582 for (i = 0; i < v->nr_blocks; i++)
583 for (j = 0; j < csums_per_device; j++)
584 stripe_csum_set(v, i, j,
585 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
588 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
590 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
591 unsigned csum_granularity = 1 << v->csum_granularity_bits;
597 for (i = 0; i < v->nr_blocks; i++) {
598 unsigned offset = buf->offset;
599 unsigned end = buf->offset + buf->size;
601 if (!test_bit(i, buf->valid))
604 while (offset < end) {
605 unsigned j = offset >> v->csum_granularity_bits;
606 unsigned len = min(csum_granularity, end - offset);
607 struct bch_csum want = stripe_csum_get(v, i, j);
608 struct bch_csum got = ec_block_checksum(buf, i, offset);
610 if (bch2_crc_cmp(want, got)) {
611 struct printbuf err = PRINTBUF;
612 struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
614 prt_str(&err, "stripe ");
615 bch2_csum_err_msg(&err, v->csum_type, want, got);
616 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
617 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
618 bch_err_ratelimited(ca, "%s", err.buf);
621 clear_bit(i, buf->valid);
623 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
632 /* Erasure coding: */
634 static void ec_generate_ec(struct ec_stripe_buf *buf)
636 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
637 unsigned nr_data = v->nr_blocks - v->nr_redundant;
638 unsigned bytes = le16_to_cpu(v->sectors) << 9;
640 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
643 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
645 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
647 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
650 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
652 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
653 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
654 unsigned nr_data = v->nr_blocks - v->nr_redundant;
655 unsigned bytes = buf->size << 9;
657 if (ec_nr_failed(buf) > v->nr_redundant) {
658 bch_err_ratelimited(c,
659 "error doing reconstruct read: unable to read enough blocks");
663 for (i = 0; i < nr_data; i++)
664 if (!test_bit(i, buf->valid))
665 failed[nr_failed++] = i;
667 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
673 static void ec_block_endio(struct bio *bio)
675 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
676 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
677 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
678 struct bch_dev *ca = ec_bio->ca;
679 struct closure *cl = bio->bi_private;
681 if (bch2_dev_io_err_on(bio->bi_status, ca,
683 ? BCH_MEMBER_ERROR_write
684 : BCH_MEMBER_ERROR_read,
685 "erasure coding %s error: %s",
686 bio_data_dir(bio) ? "write" : "read",
687 bch2_blk_status_to_str(bio->bi_status)))
688 clear_bit(ec_bio->idx, ec_bio->buf->valid);
690 if (ptr_stale(ca, ptr)) {
691 bch_err_ratelimited(ca->fs,
692 "error %s stripe: stale pointer after io",
693 bio_data_dir(bio) == READ ? "reading from" : "writing to");
694 clear_bit(ec_bio->idx, ec_bio->buf->valid);
697 bio_put(&ec_bio->bio);
698 percpu_ref_put(&ca->io_ref);
702 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
703 blk_opf_t opf, unsigned idx, struct closure *cl)
705 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
706 unsigned offset = 0, bytes = buf->size << 9;
707 struct bch_extent_ptr *ptr = &v->ptrs[idx];
708 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
709 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
712 int rw = op_is_write(opf);
714 if (ptr_stale(ca, ptr)) {
715 bch_err_ratelimited(c,
716 "error %s stripe: stale pointer",
717 rw == READ ? "reading from" : "writing to");
718 clear_bit(idx, buf->valid);
722 if (!bch2_dev_get_ioref(ca, rw)) {
723 clear_bit(idx, buf->valid);
727 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
729 while (offset < bytes) {
730 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
731 DIV_ROUND_UP(bytes, PAGE_SIZE));
732 unsigned b = min_t(size_t, bytes - offset,
733 nr_iovecs << PAGE_SHIFT);
734 struct ec_bio *ec_bio;
736 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
747 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
748 ec_bio->bio.bi_end_io = ec_block_endio;
749 ec_bio->bio.bi_private = cl;
751 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
754 percpu_ref_get(&ca->io_ref);
756 submit_bio(&ec_bio->bio);
761 percpu_ref_put(&ca->io_ref);
764 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
765 struct ec_stripe_buf *stripe)
767 struct btree_iter iter;
771 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
772 POS(0, idx), BTREE_ITER_SLOTS);
776 if (k.k->type != KEY_TYPE_stripe) {
780 bkey_reassemble(&stripe->key, k);
782 bch2_trans_iter_exit(trans, &iter);
786 /* recovery read path: */
787 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
789 struct bch_fs *c = trans->c;
790 struct ec_stripe_buf *buf;
792 struct bch_stripe *v;
796 closure_init_stack(&cl);
798 BUG_ON(!rbio->pick.has_ec);
800 buf = kzalloc(sizeof(*buf), GFP_NOFS);
802 return -BCH_ERR_ENOMEM_ec_read_extent;
804 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
806 bch_err_ratelimited(c,
807 "error doing reconstruct read: error %i looking up stripe", ret);
812 v = &bkey_i_to_stripe(&buf->key)->v;
814 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
815 bch_err_ratelimited(c,
816 "error doing reconstruct read: pointer doesn't match stripe");
821 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
822 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
823 bch_err_ratelimited(c,
824 "error doing reconstruct read: read is bigger than stripe");
829 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
833 for (i = 0; i < v->nr_blocks; i++)
834 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
838 if (ec_nr_failed(buf) > v->nr_redundant) {
839 bch_err_ratelimited(c,
840 "error doing reconstruct read: unable to read enough blocks");
845 ec_validate_checksums(c, buf);
847 ret = ec_do_recov(c, buf);
851 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
852 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
854 ec_stripe_buf_exit(buf);
859 /* stripe bucket accounting: */
861 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
863 ec_stripes_heap n, *h = &c->ec_stripes_heap;
865 if (idx >= h->size) {
866 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
867 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
869 mutex_lock(&c->ec_stripes_heap_lock);
870 if (n.size > h->size) {
871 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
875 mutex_unlock(&c->ec_stripes_heap_lock);
880 if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
881 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
883 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
884 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
885 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
890 static int ec_stripe_mem_alloc(struct btree_trans *trans,
891 struct btree_iter *iter)
893 return allocate_dropping_locks_errcode(trans,
894 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
898 * Hash table of open stripes:
899 * Stripes that are being created or modified are kept in a hash table, so that
900 * stripe deletion can skip them.
903 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
905 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
906 struct ec_stripe_new *s;
908 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
914 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
918 spin_lock(&c->ec_stripes_new_lock);
919 ret = __bch2_stripe_is_open(c, idx);
920 spin_unlock(&c->ec_stripes_new_lock);
925 static bool bch2_try_open_stripe(struct bch_fs *c,
926 struct ec_stripe_new *s,
931 spin_lock(&c->ec_stripes_new_lock);
932 ret = !__bch2_stripe_is_open(c, idx);
934 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
937 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
939 spin_unlock(&c->ec_stripes_new_lock);
944 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
948 spin_lock(&c->ec_stripes_new_lock);
949 hlist_del_init(&s->hash);
950 spin_unlock(&c->ec_stripes_new_lock);
955 /* Heap of all existing stripes, ordered by blocks_nonempty */
957 static u64 stripe_idx_to_delete(struct bch_fs *c)
959 ec_stripes_heap *h = &c->ec_stripes_heap;
961 lockdep_assert_held(&c->ec_stripes_heap_lock);
964 h->data[0].blocks_nonempty == 0 &&
965 !bch2_stripe_is_open(c, h->data[0].idx))
966 return h->data[0].idx;
971 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
972 struct ec_stripe_heap_entry l,
973 struct ec_stripe_heap_entry r)
975 return ((l.blocks_nonempty > r.blocks_nonempty) -
976 (l.blocks_nonempty < r.blocks_nonempty));
979 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
982 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
984 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
987 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
989 ec_stripes_heap *h = &c->ec_stripes_heap;
990 struct stripe *m = genradix_ptr(&c->stripes, idx);
992 BUG_ON(m->heap_idx >= h->used);
993 BUG_ON(h->data[m->heap_idx].idx != idx);
996 void bch2_stripes_heap_del(struct bch_fs *c,
997 struct stripe *m, size_t idx)
999 mutex_lock(&c->ec_stripes_heap_lock);
1000 heap_verify_backpointer(c, idx);
1002 heap_del(&c->ec_stripes_heap, m->heap_idx,
1003 ec_stripes_heap_cmp,
1004 ec_stripes_heap_set_backpointer);
1005 mutex_unlock(&c->ec_stripes_heap_lock);
1008 void bch2_stripes_heap_insert(struct bch_fs *c,
1009 struct stripe *m, size_t idx)
1011 mutex_lock(&c->ec_stripes_heap_lock);
1012 BUG_ON(heap_full(&c->ec_stripes_heap));
1014 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
1016 .blocks_nonempty = m->blocks_nonempty,
1018 ec_stripes_heap_cmp,
1019 ec_stripes_heap_set_backpointer);
1021 heap_verify_backpointer(c, idx);
1022 mutex_unlock(&c->ec_stripes_heap_lock);
1025 void bch2_stripes_heap_update(struct bch_fs *c,
1026 struct stripe *m, size_t idx)
1028 ec_stripes_heap *h = &c->ec_stripes_heap;
1032 mutex_lock(&c->ec_stripes_heap_lock);
1033 heap_verify_backpointer(c, idx);
1035 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
1038 heap_sift_up(h, i, ec_stripes_heap_cmp,
1039 ec_stripes_heap_set_backpointer);
1040 heap_sift_down(h, i, ec_stripes_heap_cmp,
1041 ec_stripes_heap_set_backpointer);
1043 heap_verify_backpointer(c, idx);
1045 do_deletes = stripe_idx_to_delete(c) != 0;
1046 mutex_unlock(&c->ec_stripes_heap_lock);
1049 bch2_do_stripe_deletes(c);
1052 /* stripe deletion */
1054 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
1056 struct bch_fs *c = trans->c;
1057 struct btree_iter iter;
1059 struct bkey_s_c_stripe s;
1062 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
1068 if (k.k->type != KEY_TYPE_stripe) {
1069 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
1074 s = bkey_s_c_to_stripe(k);
1075 for (unsigned i = 0; i < s.v->nr_blocks; i++)
1076 if (stripe_blockcount_get(s.v, i)) {
1077 struct printbuf buf = PRINTBUF;
1079 bch2_bkey_val_to_text(&buf, c, k);
1080 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
1081 printbuf_exit(&buf);
1086 ret = bch2_btree_delete_at(trans, &iter, 0);
1088 bch2_trans_iter_exit(trans, &iter);
1092 static void ec_stripe_delete_work(struct work_struct *work)
1095 container_of(work, struct bch_fs, ec_stripe_delete_work);
1098 mutex_lock(&c->ec_stripes_heap_lock);
1099 u64 idx = stripe_idx_to_delete(c);
1100 mutex_unlock(&c->ec_stripes_heap_lock);
1105 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1106 ec_stripe_delete(trans, idx));
1112 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1115 void bch2_do_stripe_deletes(struct bch_fs *c)
1117 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
1118 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
1119 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1122 /* stripe creation: */
1124 static int ec_stripe_key_update(struct btree_trans *trans,
1125 struct bkey_i_stripe *new,
1128 struct bch_fs *c = trans->c;
1129 struct btree_iter iter;
1133 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
1134 new->k.p, BTREE_ITER_INTENT);
1139 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
1140 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
1141 create ? "creating" : "updating",
1142 bch2_bkey_types[k.k->type]);
1147 if (k.k->type == KEY_TYPE_stripe) {
1148 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
1151 if (old->nr_blocks != new->v.nr_blocks) {
1152 bch_err(c, "error updating stripe: nr_blocks does not match");
1157 for (i = 0; i < new->v.nr_blocks; i++) {
1158 unsigned v = stripe_blockcount_get(old, i);
1161 (old->ptrs[i].dev != new->v.ptrs[i].dev ||
1162 old->ptrs[i].gen != new->v.ptrs[i].gen ||
1163 old->ptrs[i].offset != new->v.ptrs[i].offset));
1165 stripe_blockcount_set(&new->v, i, v);
1169 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
1171 bch2_trans_iter_exit(trans, &iter);
1175 static int ec_stripe_update_extent(struct btree_trans *trans,
1176 struct bpos bucket, u8 gen,
1177 struct ec_stripe_buf *s,
1178 struct bpos *bp_pos)
1180 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1181 struct bch_fs *c = trans->c;
1182 struct bch_backpointer bp;
1183 struct btree_iter iter;
1185 const struct bch_extent_ptr *ptr_c;
1186 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
1187 struct bch_extent_stripe_ptr stripe_ptr;
1189 int ret, dev, block;
1191 ret = bch2_get_next_backpointer(trans, bucket, gen,
1192 bp_pos, &bp, BTREE_ITER_CACHED);
1195 if (bpos_eq(*bp_pos, SPOS_MAX))
1199 struct printbuf buf = PRINTBUF;
1200 struct btree_iter node_iter;
1203 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
1204 bch2_trans_iter_exit(trans, &node_iter);
1209 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
1210 bch2_backpointer_to_text(&buf, &bp);
1212 bch2_fs_inconsistent(c, "%s", buf.buf);
1213 printbuf_exit(&buf);
1217 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
1223 * extent no longer exists - we could flush the btree
1224 * write buffer and retry to verify, but no need:
1229 if (extent_has_stripe_ptr(k, s->key.k.p.offset))
1232 ptr_c = bkey_matches_stripe(v, k, &block);
1234 * It doesn't generally make sense to erasure code cached ptrs:
1235 * XXX: should we be incrementing a counter?
1237 if (!ptr_c || ptr_c->cached)
1240 dev = v->ptrs[block].dev;
1242 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
1243 ret = PTR_ERR_OR_ZERO(n);
1247 bkey_reassemble(n, k);
1249 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
1250 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
1253 stripe_ptr = (struct bch_extent_stripe_ptr) {
1254 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
1256 .redundancy = v->nr_redundant,
1257 .idx = s->key.k.p.offset,
1260 __extent_entry_insert(n,
1261 (union bch_extent_entry *) ec_ptr,
1262 (union bch_extent_entry *) &stripe_ptr);
1264 ret = bch2_trans_update(trans, &iter, n, 0);
1266 bch2_trans_iter_exit(trans, &iter);
1270 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
1273 struct bch_fs *c = trans->c;
1274 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1275 struct bch_extent_ptr bucket = v->ptrs[block];
1276 struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
1277 struct bpos bp_pos = POS_MIN;
1281 ret = commit_do(trans, NULL, NULL,
1282 BCH_TRANS_COMMIT_no_check_rw|
1283 BCH_TRANS_COMMIT_no_enospc,
1284 ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
1288 if (bkey_eq(bp_pos, POS_MAX))
1291 bp_pos = bpos_nosnap_successor(bp_pos);
1297 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1299 struct btree_trans *trans = bch2_trans_get(c);
1300 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1301 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1304 ret = bch2_btree_write_buffer_flush_sync(trans);
1308 for (i = 0; i < nr_data; i++) {
1309 ret = ec_stripe_update_bucket(trans, s, i);
1314 bch2_trans_put(trans);
1319 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1320 struct ec_stripe_new *s,
1322 struct open_bucket *ob)
1324 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1325 unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1328 if (!bch2_dev_get_ioref(ca, WRITE)) {
1329 s->err = -BCH_ERR_erofs_no_writes;
1333 memset(s->new_stripe.data[block] + (offset << 9),
1335 ob->sectors_free << 9);
1337 ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1338 ob->bucket * ca->mi.bucket_size + offset,
1342 percpu_ref_put(&ca->io_ref);
1348 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1351 bch2_stripe_close(c, s);
1356 * data buckets of new stripe all written: create the stripe
1358 static void ec_stripe_create(struct ec_stripe_new *s)
1360 struct bch_fs *c = s->c;
1361 struct open_bucket *ob;
1362 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1363 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1366 BUG_ON(s->h->s == s);
1368 closure_sync(&s->iodone);
1371 for (i = 0; i < nr_data; i++)
1373 ob = c->open_buckets + s->blocks[i];
1375 if (ob->sectors_free)
1376 zero_out_rest_of_ec_bucket(c, s, i, ob);
1381 if (!bch2_err_matches(s->err, EROFS))
1382 bch_err(c, "error creating stripe: error writing data buckets");
1386 if (s->have_existing_stripe) {
1387 ec_validate_checksums(c, &s->existing_stripe);
1389 if (ec_do_recov(c, &s->existing_stripe)) {
1390 bch_err(c, "error creating stripe: error reading existing stripe");
1394 for (i = 0; i < nr_data; i++)
1395 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
1396 swap(s->new_stripe.data[i],
1397 s->existing_stripe.data[i]);
1399 ec_stripe_buf_exit(&s->existing_stripe);
1402 BUG_ON(!s->allocated);
1405 ec_generate_ec(&s->new_stripe);
1407 ec_generate_checksums(&s->new_stripe);
1410 for (i = nr_data; i < v->nr_blocks; i++)
1411 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1412 closure_sync(&s->iodone);
1414 if (ec_nr_failed(&s->new_stripe)) {
1415 bch_err(c, "error creating stripe: error writing redundancy buckets");
1419 ret = bch2_trans_do(c, &s->res, NULL,
1420 BCH_TRANS_COMMIT_no_check_rw|
1421 BCH_TRANS_COMMIT_no_enospc,
1422 ec_stripe_key_update(trans,
1423 bkey_i_to_stripe(&s->new_stripe.key),
1424 !s->have_existing_stripe));
1425 bch_err_msg(c, ret, "creating stripe key");
1430 ret = ec_stripe_update_extents(c, &s->new_stripe);
1431 bch_err_msg(c, ret, "error updating extents");
1435 bch2_disk_reservation_put(c, &s->res);
1437 for (i = 0; i < v->nr_blocks; i++)
1439 ob = c->open_buckets + s->blocks[i];
1443 __bch2_open_bucket_put(c, ob);
1445 bch2_open_bucket_put(c, ob);
1449 mutex_lock(&c->ec_stripe_new_lock);
1451 mutex_unlock(&c->ec_stripe_new_lock);
1452 wake_up(&c->ec_stripe_new_wait);
1454 ec_stripe_buf_exit(&s->existing_stripe);
1455 ec_stripe_buf_exit(&s->new_stripe);
1456 closure_debug_destroy(&s->iodone);
1458 ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1461 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1463 struct ec_stripe_new *s;
1465 mutex_lock(&c->ec_stripe_new_lock);
1466 list_for_each_entry(s, &c->ec_stripe_new_list, list)
1467 if (!atomic_read(&s->ref[STRIPE_REF_io]))
1471 mutex_unlock(&c->ec_stripe_new_lock);
1476 static void ec_stripe_create_work(struct work_struct *work)
1478 struct bch_fs *c = container_of(work,
1479 struct bch_fs, ec_stripe_create_work);
1480 struct ec_stripe_new *s;
1482 while ((s = get_pending_stripe(c)))
1483 ec_stripe_create(s);
1485 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1488 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1490 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1492 if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1493 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1496 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1498 struct ec_stripe_new *s = h->s;
1500 BUG_ON(!s->allocated && !s->err);
1505 mutex_lock(&c->ec_stripe_new_lock);
1506 list_add(&s->list, &c->ec_stripe_new_list);
1507 mutex_unlock(&c->ec_stripe_new_lock);
1509 ec_stripe_new_put(c, s, STRIPE_REF_io);
1512 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1514 struct ec_stripe_new *s = ob->ec;
1519 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1521 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1528 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1530 ca = bch_dev_bkey_exists(c, ob->dev);
1531 offset = ca->mi.bucket_size - ob->sectors_free;
1533 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1536 static int unsigned_cmp(const void *_l, const void *_r)
1538 unsigned l = *((const unsigned *) _l);
1539 unsigned r = *((const unsigned *) _r);
1541 return cmp_int(l, r);
1544 /* pick most common bucket size: */
1545 static unsigned pick_blocksize(struct bch_fs *c,
1546 struct bch_devs_mask *devs)
1548 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1551 } cur = { 0, 0 }, best = { 0, 0 };
1553 for_each_member_device_rcu(c, ca, devs)
1554 sizes[nr++] = ca->mi.bucket_size;
1556 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1558 for (unsigned i = 0; i < nr; i++) {
1559 if (sizes[i] != cur.size) {
1560 if (cur.nr > best.nr)
1564 cur.size = sizes[i];
1570 if (cur.nr > best.nr)
1576 static bool may_create_new_stripe(struct bch_fs *c)
1581 static void ec_stripe_key_init(struct bch_fs *c,
1585 unsigned stripe_size)
1587 struct bkey_i_stripe *s = bkey_stripe_init(k);
1590 s->v.sectors = cpu_to_le16(stripe_size);
1592 s->v.nr_blocks = nr_data + nr_parity;
1593 s->v.nr_redundant = nr_parity;
1594 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
1595 s->v.csum_type = BCH_CSUM_crc32c;
1598 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1599 BUG_ON(1 << s->v.csum_granularity_bits >=
1600 le16_to_cpu(s->v.sectors) ||
1601 s->v.csum_granularity_bits == U8_MAX);
1602 s->v.csum_granularity_bits++;
1605 set_bkey_val_u64s(&s->k, u64s);
1608 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1610 struct ec_stripe_new *s;
1612 lockdep_assert_held(&h->lock);
1614 s = kzalloc(sizeof(*s), GFP_KERNEL);
1616 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
1618 mutex_init(&s->lock);
1619 closure_init(&s->iodone, NULL);
1620 atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1621 atomic_set(&s->ref[STRIPE_REF_io], 1);
1624 s->nr_data = min_t(unsigned, h->nr_active_devs,
1625 BCH_BKEY_PTRS_MAX) - h->redundancy;
1626 s->nr_parity = h->redundancy;
1628 ec_stripe_key_init(c, &s->new_stripe.key,
1629 s->nr_data, s->nr_parity, h->blocksize);
1635 static struct ec_stripe_head *
1636 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1637 unsigned algo, unsigned redundancy,
1638 enum bch_watermark watermark)
1640 struct ec_stripe_head *h;
1642 h = kzalloc(sizeof(*h), GFP_KERNEL);
1646 mutex_init(&h->lock);
1647 BUG_ON(!mutex_trylock(&h->lock));
1651 h->redundancy = redundancy;
1652 h->watermark = watermark;
1655 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1657 for_each_member_device_rcu(c, ca, &h->devs)
1658 if (!ca->mi.durability)
1659 __clear_bit(ca->dev_idx, h->devs.d);
1661 h->blocksize = pick_blocksize(c, &h->devs);
1663 for_each_member_device_rcu(c, ca, &h->devs)
1664 if (ca->mi.bucket_size == h->blocksize)
1665 h->nr_active_devs++;
1670 * If we only have redundancy + 1 devices, we're better off with just
1673 if (h->nr_active_devs < h->redundancy + 2)
1674 bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
1675 h->nr_active_devs, h->redundancy + 2);
1677 list_add(&h->list, &c->ec_stripe_head_list);
1681 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1685 bitmap_weight(h->s->blocks_allocated,
1686 h->s->nr_data) == h->s->nr_data)
1687 ec_stripe_set_pending(c, h);
1689 mutex_unlock(&h->lock);
1692 static struct ec_stripe_head *
1693 __bch2_ec_stripe_head_get(struct btree_trans *trans,
1696 unsigned redundancy,
1697 enum bch_watermark watermark)
1699 struct bch_fs *c = trans->c;
1700 struct ec_stripe_head *h;
1706 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1708 return ERR_PTR(ret);
1710 if (test_bit(BCH_FS_going_ro, &c->flags)) {
1711 h = ERR_PTR(-BCH_ERR_erofs_no_writes);
1715 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1716 if (h->target == target &&
1718 h->redundancy == redundancy &&
1719 h->watermark == watermark) {
1720 ret = bch2_trans_mutex_lock(trans, &h->lock);
1726 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
1728 if (!IS_ERR_OR_NULL(h) &&
1729 h->nr_active_devs < h->redundancy + 2) {
1730 mutex_unlock(&h->lock);
1733 mutex_unlock(&c->ec_stripe_head_lock);
1737 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
1738 enum bch_watermark watermark, struct closure *cl)
1740 struct bch_fs *c = trans->c;
1741 struct bch_devs_mask devs = h->devs;
1742 struct open_bucket *ob;
1743 struct open_buckets buckets;
1744 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1745 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1746 bool have_cache = true;
1749 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
1750 BUG_ON(v->nr_redundant != h->s->nr_parity);
1752 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
1753 __clear_bit(v->ptrs[i].dev, devs.d);
1754 if (i < h->s->nr_data)
1760 BUG_ON(nr_have_data > h->s->nr_data);
1761 BUG_ON(nr_have_parity > h->s->nr_parity);
1764 if (nr_have_parity < h->s->nr_parity) {
1765 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1775 open_bucket_for_each(c, &buckets, ob, i) {
1776 j = find_next_zero_bit(h->s->blocks_gotten,
1777 h->s->nr_data + h->s->nr_parity,
1779 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1781 h->s->blocks[j] = buckets.v[i];
1782 v->ptrs[j] = bch2_ob_ptr(c, ob);
1783 __set_bit(j, h->s->blocks_gotten);
1791 if (nr_have_data < h->s->nr_data) {
1792 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1802 open_bucket_for_each(c, &buckets, ob, i) {
1803 j = find_next_zero_bit(h->s->blocks_gotten,
1805 BUG_ON(j >= h->s->nr_data);
1807 h->s->blocks[j] = buckets.v[i];
1808 v->ptrs[j] = bch2_ob_ptr(c, ob);
1809 __set_bit(j, h->s->blocks_gotten);
1819 /* XXX: doesn't obey target: */
1820 static s64 get_existing_stripe(struct bch_fs *c,
1821 struct ec_stripe_head *head)
1823 ec_stripes_heap *h = &c->ec_stripes_heap;
1829 if (may_create_new_stripe(c))
1832 mutex_lock(&c->ec_stripes_heap_lock);
1833 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1834 /* No blocks worth reusing, stripe will just be deleted: */
1835 if (!h->data[heap_idx].blocks_nonempty)
1838 stripe_idx = h->data[heap_idx].idx;
1840 m = genradix_ptr(&c->stripes, stripe_idx);
1842 if (m->algorithm == head->algo &&
1843 m->nr_redundant == head->redundancy &&
1844 m->sectors == head->blocksize &&
1845 m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
1846 bch2_try_open_stripe(c, head->s, stripe_idx)) {
1851 mutex_unlock(&c->ec_stripes_heap_lock);
1855 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
1857 struct bch_fs *c = trans->c;
1858 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1859 struct bch_stripe *existing_v;
1865 * If we can't allocate a new stripe, and there's no stripes with empty
1866 * blocks for us to reuse, that means we have to wait on copygc:
1868 idx = get_existing_stripe(c, h);
1870 return -BCH_ERR_stripe_alloc_blocked;
1872 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
1873 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c,
1874 "reading stripe key: %s", bch2_err_str(ret));
1876 bch2_stripe_close(c, h->s);
1880 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
1882 BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
1883 h->s->nr_data = existing_v->nr_blocks -
1884 existing_v->nr_redundant;
1886 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
1888 bch2_stripe_close(c, h->s);
1892 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1893 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
1896 * Free buckets we initially allocated - they might conflict with
1897 * blocks from the stripe we're reusing:
1899 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
1900 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
1901 h->s->blocks[i] = 0;
1903 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
1904 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
1906 for (i = 0; i < existing_v->nr_blocks; i++) {
1907 if (stripe_blockcount_get(existing_v, i)) {
1908 __set_bit(i, h->s->blocks_gotten);
1909 __set_bit(i, h->s->blocks_allocated);
1912 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1915 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
1916 h->s->have_existing_stripe = true;
1921 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
1923 struct bch_fs *c = trans->c;
1924 struct btree_iter iter;
1926 struct bpos min_pos = POS(0, 1);
1927 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1930 if (!h->s->res.sectors) {
1931 ret = bch2_disk_reservation_get(c, &h->s->res,
1934 BCH_DISK_RESERVATION_NOFAIL);
1939 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1940 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
1941 if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1942 if (start_pos.offset) {
1943 start_pos = min_pos;
1944 bch2_btree_iter_set_pos(&iter, start_pos);
1948 ret = -BCH_ERR_ENOSPC_stripe_create;
1952 if (bkey_deleted(k.k) &&
1953 bch2_try_open_stripe(c, h->s, k.k->p.offset))
1957 c->ec_stripe_hint = iter.pos.offset;
1962 ret = ec_stripe_mem_alloc(trans, &iter);
1964 bch2_stripe_close(c, h->s);
1968 h->s->new_stripe.key.k.p = iter.pos;
1970 bch2_trans_iter_exit(trans, &iter);
1973 bch2_disk_reservation_put(c, &h->s->res);
1977 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
1980 unsigned redundancy,
1981 enum bch_watermark watermark,
1984 struct bch_fs *c = trans->c;
1985 struct ec_stripe_head *h;
1986 bool waiting = false;
1989 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
1990 if (IS_ERR_OR_NULL(h))
1994 ret = ec_new_stripe_alloc(c, h);
1996 bch_err(c, "failed to allocate new stripe");
2001 if (h->s->allocated)
2004 if (h->s->have_existing_stripe)
2005 goto alloc_existing;
2007 /* First, try to allocate a full stripe: */
2008 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
2009 __bch2_ec_stripe_head_reserve(trans, h);
2012 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2013 bch2_err_matches(ret, ENOMEM))
2017 * Not enough buckets available for a full stripe: we must reuse an
2021 ret = __bch2_ec_stripe_head_reuse(trans, h);
2024 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
2027 if (watermark == BCH_WATERMARK_copygc) {
2028 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
2029 __bch2_ec_stripe_head_reserve(trans, h);
2035 /* XXX freelist_wait? */
2036 closure_wait(&c->freelist_wait, cl);
2041 closure_wake_up(&c->freelist_wait);
2044 * Retry allocating buckets, with the watermark for this
2047 ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
2052 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
2056 h->s->allocated = true;
2059 BUG_ON(!h->s->new_stripe.data[0]);
2060 BUG_ON(trans->restarted);
2063 bch2_ec_stripe_head_put(c, h);
2064 return ERR_PTR(ret);
2067 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
2069 struct ec_stripe_head *h;
2070 struct open_bucket *ob;
2073 mutex_lock(&c->ec_stripe_head_lock);
2074 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2075 mutex_lock(&h->lock);
2082 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
2083 if (!h->s->blocks[i])
2086 ob = c->open_buckets + h->s->blocks[i];
2087 if (ob->dev == ca->dev_idx)
2092 h->s->err = -BCH_ERR_erofs_no_writes;
2093 ec_stripe_set_pending(c, h);
2095 mutex_unlock(&h->lock);
2097 mutex_unlock(&c->ec_stripe_head_lock);
2100 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
2102 __bch2_ec_stop(c, ca);
2105 void bch2_fs_ec_stop(struct bch_fs *c)
2107 __bch2_ec_stop(c, NULL);
2110 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
2114 mutex_lock(&c->ec_stripe_new_lock);
2115 ret = list_empty(&c->ec_stripe_new_list);
2116 mutex_unlock(&c->ec_stripe_new_lock);
2121 void bch2_fs_ec_flush(struct bch_fs *c)
2123 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
2126 int bch2_stripes_read(struct bch_fs *c)
2128 int ret = bch2_trans_run(c,
2129 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
2130 BTREE_ITER_PREFETCH, k, ({
2131 if (k.k->type != KEY_TYPE_stripe)
2134 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
2138 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
2140 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
2141 m->sectors = le16_to_cpu(s->sectors);
2142 m->algorithm = s->algorithm;
2143 m->nr_blocks = s->nr_blocks;
2144 m->nr_redundant = s->nr_redundant;
2145 m->blocks_nonempty = 0;
2147 for (unsigned i = 0; i < s->nr_blocks; i++)
2148 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
2150 bch2_stripes_heap_insert(c, m, k.k->p.offset);
2157 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
2159 ec_stripes_heap *h = &c->ec_stripes_heap;
2163 mutex_lock(&c->ec_stripes_heap_lock);
2164 for (i = 0; i < min_t(size_t, h->used, 50); i++) {
2165 m = genradix_ptr(&c->stripes, h->data[i].idx);
2167 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
2168 h->data[i].blocks_nonempty,
2169 m->nr_blocks - m->nr_redundant,
2171 if (bch2_stripe_is_open(c, h->data[i].idx))
2172 prt_str(out, " open");
2175 mutex_unlock(&c->ec_stripes_heap_lock);
2178 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
2180 struct ec_stripe_head *h;
2181 struct ec_stripe_new *s;
2183 mutex_lock(&c->ec_stripe_head_lock);
2184 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2185 prt_printf(out, "target %u algo %u redundancy %u %s:\n",
2186 h->target, h->algo, h->redundancy,
2187 bch2_watermarks[h->watermark]);
2190 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
2191 h->s->idx, h->s->nr_data, h->s->nr_parity,
2192 bitmap_weight(h->s->blocks_allocated,
2195 mutex_unlock(&c->ec_stripe_head_lock);
2197 prt_printf(out, "in flight:\n");
2199 mutex_lock(&c->ec_stripe_new_lock);
2200 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
2201 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
2202 s->idx, s->nr_data, s->nr_parity,
2203 atomic_read(&s->ref[STRIPE_REF_io]),
2204 atomic_read(&s->ref[STRIPE_REF_stripe]),
2205 bch2_watermarks[s->h->watermark]);
2207 mutex_unlock(&c->ec_stripe_new_lock);
2210 void bch2_fs_ec_exit(struct bch_fs *c)
2212 struct ec_stripe_head *h;
2216 mutex_lock(&c->ec_stripe_head_lock);
2217 h = list_first_entry_or_null(&c->ec_stripe_head_list,
2218 struct ec_stripe_head, list);
2221 mutex_unlock(&c->ec_stripe_head_lock);
2226 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
2227 BUG_ON(h->s->blocks[i]);
2234 BUG_ON(!list_empty(&c->ec_stripe_new_list));
2236 free_heap(&c->ec_stripes_heap);
2237 genradix_free(&c->stripes);
2238 bioset_exit(&c->ec_bioset);
2241 void bch2_fs_ec_init_early(struct bch_fs *c)
2243 spin_lock_init(&c->ec_stripes_new_lock);
2244 mutex_init(&c->ec_stripes_heap_lock);
2246 INIT_LIST_HEAD(&c->ec_stripe_head_list);
2247 mutex_init(&c->ec_stripe_head_lock);
2249 INIT_LIST_HEAD(&c->ec_stripe_new_list);
2250 mutex_init(&c->ec_stripe_new_lock);
2251 init_waitqueue_head(&c->ec_stripe_new_wait);
2253 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
2254 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
2257 int bch2_fs_ec_init(struct bch_fs *c)
2259 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),